赞
踩
KafkaProducer 的创建需要指定的参数和含义:
参数 | 说明 |
bootstrap.servers | 配置生产者如何与broker建立连接。该参数设置的是初始化参数。如果生产者需要连接的是Kafka集群,则这里配置集群中几个broker的地址,而不是全部,当生产者连接上此处指定的broker之后,在通过该连接发现集群中的其他节点。 |
key.serializer | 要发送信息的key数据的序列化类。设置的时候可以写类名,也可以使用该类的Class对象。 |
value.serializer | 要发送消息的alue数据的序列化类。设置的时候可以写类名,也可以使用该类的Class对象。 |
acks | 默认值:all。 acks=0:
acks=1
acks=all
|
retries | retries重试次数
|
其他参数可以从org.apache.kafka.clients.producer.ProducerConfig 中找到。
生产者:
- package com.example.producer;
-
- import org.apache.kafka.clients.producer.Callback;
- import org.apache.kafka.clients.producer.KafkaProducer;
- import org.apache.kafka.clients.producer.ProducerRecord;
- import org.apache.kafka.clients.producer.RecordMetadata;
- import org.apache.kafka.common.header.Header;
- import org.apache.kafka.common.header.internals.RecordHeader;
- import org.apache.kafka.common.serialization.IntegerSerializer;
- import org.apache.kafka.common.serialization.StringSerializer;
-
- import java.util.ArrayList;
- import java.util.HashMap;
- import java.util.List;
- import java.util.Map;
- import java.util.concurrent.ExecutionException;
- import java.util.concurrent.Future;
-
- public class MyProducer1 {
- public static void main(String[] args) throws ExecutionException, InterruptedException {
- Map<String, Object> configs = new HashMap<>();
- // 指定初始连接用到的broker地址,如果是集群,则可以通过此初始连接发现集群中的其他broker
- configs.put("bootstrap.servers", "192.168.80.121:9092");
- // 指定key的序列化类
- configs.put("key.serializer", IntegerSerializer.class);
- // 指定value的序列化类
- configs.put("value.serializer", StringSerializer.class);
-
- /* configs.put("acks", "all");
- configs.put("retries", 3); */
-
- // 创建kafkaProducer对象
- KafkaProducer<Integer, String> producer = new KafkaProducer<Integer, String>(configs);
-
- // 用于设置用户自定义的消息头字段
- List<Header> headers = new ArrayList<>();
- headers.add(new RecordHeader("biz.name", "producer.demo".getBytes()));
-
- // 封装消息
- ProducerRecord<Integer, String> record = new ProducerRecord<Integer, String>(
- "topic_1", // 主题名称
- 0, // 分区编号,现在只有一个分区,所以是0
- 0, // 数字作为key
- "hello world", // 字符串作为value
- headers
- );
-
- // 消息的同步确认
- /* Future<RecordMetadata> future = producer.send(record);
- RecordMetadata metadata = future.get();
- System.out.println("消息的主题:" + metadata.topic());
- System.out.println("消息的分区:" + metadata.partition());
- System.out.println("消息的偏移量:" + metadata.offset()); */
-
-
- // 消息的异步确认
- producer.send(record, new Callback() {
- @Override
- public void onCompletion(RecordMetadata metadata, Exception e) {
- if (e==null) {
- System.out.println("消息的主题:" + metadata.topic());
- System.out.println("消息的分区:" + metadata.partition());
- System.out.println("消息的偏移量:" + metadata.offset());
- }else{
- System.out.println("异常消息:"+e.getMessage());
- }
- }
- });
-
- // 关闭生产者
- producer.close();
- }
- }
消费者:
- package com.example.consumer;
-
- import org.apache.kafka.clients.consumer.ConsumerConfig;
- import org.apache.kafka.clients.consumer.ConsumerRecord;
- import org.apache.kafka.clients.consumer.ConsumerRecords;
- import org.apache.kafka.clients.consumer.KafkaConsumer;
- import org.apache.kafka.common.serialization.IntegerDeserializer;
- import org.apache.kafka.common.serialization.StringDeserializer;
-
- import java.util.Arrays;
- import java.util.HashMap;
- import java.util.Map;
- import java.util.function.Consumer;
-
- public class MyConsumer {
- public static void main(String[] args) {
- Map<String, Object> configs = new HashMap<>();
- // 指定初始连接用到的broker地址
- // configs.put("bootstrap.servers", "192.168.80.121:9092");
- // 上面方式如果怕写错,可以尝试下面这种方法
- configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.80.121:9092");
- // 指定key的反序列化类
- configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class);
- // 指定value的反序列化类
- configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
-
- // 配置消费组id
- configs.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_demo");
- // earliest:如果找不到当前消费者的有效偏移量,自自动重置到最开始
- // latest:表示直接重置到消息偏移量的最后一个
- configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
-
- KafkaConsumer<Integer, String> consumer = new KafkaConsumer<Integer, String>(configs);
-
- // 先订阅,在消费
- consumer.subscribe(Arrays.asList("topic_1"));
-
- /* while (true) {
- ConsumerRecords<Integer, String> consumerRecords = consumer.poll(3_000);
- } */
- // 如果主题中没有可消费的消息,则该方法可以放到while循环中,每过3秒重新拉取一次
- // 如果还没有拉取到,过三秒再次拉取,防止while循环过于密集的poll调用
-
- // 批量从主题的分区拉取消息
- ConsumerRecords<Integer, String> consumerRecords = consumer.poll(3_000); // 指定拉取消息的时间间隔
-
- // 遍历本次从主题分区拉取的批量消息
- consumerRecords.forEach(new Consumer<ConsumerRecord<Integer, String>>() {
- @Override
- public void accept(ConsumerRecord<Integer, String> record) {
- System.out.println("========================================");
- System.out.println("消息头字段:" + Arrays.toString(record.headers().toArray()));
- System.out.println("消息的key:" + record.key());
- System.out.println("消息的偏移量:" + record.offset());
- System.out.println("消息的分区号:" + record.partition());
- System.out.println("消息的序列化key字节数:" + record.serializedKeySize());
- System.out.println("消息的序列化value字节数:" + record.serializedValueSize());
- System.out.println("消息的时间戳:" + record.timestamp());
- System.out.println("消息的时间戳类型:" + record.timestampType());
- System.out.println("消息的主题:" + record.topic());
- System.out.println("消息的值:" + record.value());
- }
- });
-
- consumer.close();
- }
- }
(1)pom.xml文件
- <?xml version="1.0" encoding="UTF-8"?>
- <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>org.springframework.boot</groupId>
- <artifactId>spring-boot-starter-parent</artifactId>
- <version>2.2.8.RELEASE</version>
- <relativePath/> <!-- lookup parent from repository -->
- </parent>
- <groupId>com.example</groupId>
- <artifactId>demo_02_springboot-kafka</artifactId>
- <version>0.0.1-SNAPSHOT</version>
- <name>demo_02_springboot-kafka</name>
- <description>demo_02_springboot-kafka</description>
- <properties>
- <java.version>8</java.version>
- </properties>
- <dependencies>
- <dependency>
- <groupId>org.springframework.boot</groupId>
- <artifactId>spring-boot-starter-web</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework.kafka</groupId>
- <artifactId>spring-kafka</artifactId>
- </dependency>
- <dependency>
- <groupId>org.springframework.boot</groupId>
- <artifactId>spring-boot-starter-test</artifactId>
- <scope>test</scope>
- <exclusions>
- <exclusion>
- <groupId>org.junit.vintage</groupId>
- <artifactId>junit-vintage-engine</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
- <dependency>
- <groupId>org.springframework.kafka</groupId>
- <artifactId>spring-kafka-test</artifactId>
- <scope>test</scope>
- </dependency>
- </dependencies>
-
- <build>
- <plugins>
- <plugin>
- <groupId>org.springframework.boot</groupId>
- <artifactId>spring-boot-maven-plugin</artifactId>
- </plugin>
- </plugins>
- </build>
-
- </project>
(2)application.properties
spring.application.name=springboot-kafka-02 server.port=8080 # 用于建立初始连接的broker地址 spring.kafka.bootstrap-servers=192.168.80.121:9092 # producer用到的key和value的序列化类 spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.IntegerSerializer spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer # 默认的批处理记录数 spring.kafka.producer.batch-size=16384 # 32MB的总发送缓存 spring.kafka.producer.buffer-memory=33554432 # consumer用到的key和value的反序列化类 spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.IntegerDeserializer spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer # consumer的消费组id spring.kafka.consumer.group-id=springboot-consumer02 # 是否自动提交消费者偏移量 spring.kafka.consumer.enable-auto-commit=true # 每隔100ms向broker提交一次偏移量 spring.kafka.consumer.auto-commit-interval=100 # 如果该消费者的偏移量不存在,则自动设置为最早的偏移量 spring.kafka.consumer.auto-offset-reset=earliest
(3)启动类
- package com.example.demo;
-
- import org.springframework.boot.SpringApplication;
- import org.springframework.boot.autoconfigure.SpringBootApplication;
-
- @SpringBootApplication
- public class Demo02SpringbootKafkaApplication {
-
- public static void main(String[] args) {
- SpringApplication.run(Demo02SpringbootKafkaApplication.class, args);
- }
-
- }
(4)KafkaConfig(在这里可以进行主题的创建、自定义了kafkaAdmin对象等一系列配置,也可以省略,如果kafka在连接主题时,发现没有,KafkaAdmin这个类会自动帮我们创建)
- package com.example.demo.config;
-
- import org.apache.kafka.clients.admin.NewTopic;
- import org.springframework.context.annotation.Bean;
- import org.springframework.context.annotation.Configuration;
-
- @Configuration
- public class KafkaConfig {
-
- // 创建主题
- @Bean
- public NewTopic topic1() {
- return new NewTopic("nptc-01",3, (short) 1);
- }
-
- @Bean
- public NewTopic topic2() {
- return new NewTopic("nptc-02",5, (short) 1);
- }
-
-
- }
(5)生产者
同步方式
- package com.example.demo.controlller;
-
- import org.apache.kafka.clients.producer.RecordMetadata;
- import org.springframework.beans.factory.annotation.Autowired;
- import org.springframework.kafka.core.KafkaTemplate;
- import org.springframework.kafka.support.SendResult;
- import org.springframework.util.concurrent.ListenableFuture;
- import org.springframework.web.bind.annotation.PathVariable;
- import org.springframework.web.bind.annotation.RequestMapping;
- import org.springframework.web.bind.annotation.RestController;
-
- import java.util.concurrent.CompletableFuture;
- import java.util.concurrent.ExecutionException;
-
- @RestController
- public class KafkaSyncProducerController {
- @Autowired
- private KafkaTemplate<Integer, String> template;
-
- @RequestMapping("send/sync/{message}")
- public String send(@PathVariable String message) throws ExecutionException, InterruptedException {
- ListenableFuture<SendResult<Integer, String>> future = template.send("topic-spring-01", 0, 0, message);
-
- // 同步发送消息
- SendResult<Integer, String> sendResult = future.get();
- RecordMetadata metadata = sendResult.getRecordMetadata();
- System.out.println(metadata.topic() + "\t" + metadata.partition() + "\t" + metadata.offset());
- return "success";
- }
-
- }
异步回调方式
- package com.example.demo.controlller;
-
- import org.apache.kafka.clients.producer.RecordMetadata;
- import org.springframework.beans.factory.annotation.Autowired;
- import org.springframework.kafka.core.KafkaTemplate;
- import org.springframework.kafka.support.SendResult;
- import org.springframework.util.concurrent.ListenableFuture;
- import org.springframework.util.concurrent.ListenableFutureCallback;
- import org.springframework.web.bind.annotation.PathVariable;
- import org.springframework.web.bind.annotation.RequestMapping;
- import org.springframework.web.bind.annotation.RestController;
-
- @RestController
- public class KafkaAsyncProducerController {
- @Autowired
- private KafkaTemplate<Integer, String> template;
-
- @RequestMapping("send/async/{message}")
- public String send(@PathVariable String message) {
- ListenableFuture<SendResult<Integer, String>> future = (ListenableFuture<SendResult<Integer, String>>) template.send("topic-spring-01", 0, 1, message);
-
- future.addCallback(new ListenableFutureCallback<SendResult<Integer, String>>() {
- @Override
- public void onFailure(Throwable ex) {
- System.out.println("发送消息失败:" + ex.getMessage());
- }
-
- @Override
- public void onSuccess(SendResult<Integer, String> result) {
- RecordMetadata metadata = result.getRecordMetadata();
- System.out.println("发送消息成功:" + metadata.topic() + "\t" + metadata.partition() + "\t" + metadata.offset());
- }
- });
-
- return "success";
- }
-
- }
(6)消费者
- package com.example.demo.consumer;
-
- import org.apache.kafka.clients.consumer.ConsumerRecord;
- import org.springframework.kafka.annotation.KafkaListener;
- import org.springframework.stereotype.Component;
-
- @Component
- public class MyConsumer {
- @KafkaListener(topics = "topic-spring-01")
- public void onMessage(ConsumerRecord<Integer, String> record) {
- System.out.println("消费者收到的消息:" + record.topic() + "\t" + record.partition() + "\t" + record.offset() + "\t" + record.key() + "\t" + record.value());
- }
- }
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。