赞
踩
- <dependency>
- <groupId>org.apache.kafka</groupId>
- <artifactId>kafka-clients</artifactId>
- <version>3.0.0</version>
- </dependency>
- /**
- * 自定义分区器
- *
- * @Author: chen yang
- * @Date: 2023/5/7 11:34
- */
- public class CustomerPartitioner implements Partitioner {
- @Override
- public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
-
- List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topic);
- int partition = 2;
- return partitionInfos.size() % partition;
- }
-
- @Override
- public void close() {
-
- }
-
- @Override
- public void configure(Map<String, ?> configs) {
-
- }
- }

- /**
- * @Author: chen yang
- * @Date: 2023/5/6 21:59
- */
- public class Producer {
- public static void main(String[] args) throws ExecutionException, InterruptedException {
-
- Properties properties = new Properties();
- // 连接集群
- properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
-
- // 序列化类型
- properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
- properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
-
- // 添加自定义分区器
- properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, CustomerPartitioner.class.getName());
-
-
- // batch.size:批次大小,默认 16K
- properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
- // linger.ms:等待时间,默认 0
- properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
- // RecordAccumulator:缓冲区大小,默认 32M:buffer.memory
- properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);
- // compression.type:压缩,默认 none,可配置值 gzip、snappy、lz4 和 zstd
- properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
-
-
- // 设置 ack
- // 0:不需要等待数据落盘应答,1:需要等 leader 落盘应答,-1(all):需要等 leader 和所有的 follower(isr队列) 落盘应答
- // type: String, valid values [all, -1, 0, 1], default: all
- properties.put(ProducerConfig.ACKS_CONFIG, "all");
-
- // 设置重试次数,默认为 int 最大值
- properties.put(ProducerConfig.RETRIES_CONFIG, 3);
-
- // 设置事务id
- properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "default_transactional_id_23");
-
- KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
-
- // 开启事务
- producer.initTransactions();
- producer.beginTransaction();
-
- try {
- // 异步发送
- producer.send(new ProducerRecord<>("test", "this is async message"));
-
- producer.send(new ProducerRecord<>("test", "this is a async rollback message!"), new Callback() {
- @Override
- public void onCompletion(RecordMetadata recordMetadata, Exception e) {
- // 发送消息失败会自动重试,不需要在回调函数中手动重试
- if (Objects.isNull(e)){
- System.out.println("result: " + recordMetadata.topic() + ", partitions: " + recordMetadata.partition());
- }
- }
- });
-
- // 同步发送,只需要在异步发送的基础上再调用 get() 犯法即可
- producer.send(new ProducerRecord<>("test", 1,"","this is sync message")).get();
- producer.commitTransaction();
-
- }catch (Exception e){
- producer.abortTransaction();
- }finally {
- producer.close();
- }
- }
- }

- /**
- * @Author: chen yang
- * @Date: 2023/7/9 10:37
- */
- public class Consumer {
-
- public static void main(String[] args) {
-
- Properties properties = new Properties();
- properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
- properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
- properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_01");
- properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
- properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
-
- try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
- // 订阅 topic
- // ArrayList<String> topics = new ArrayList<>();
- // consumer.subscribe(topics);
-
- // 订阅 topic 下的 partition
- ArrayList<TopicPartition> topicPartitions = new ArrayList<>();
- topicPartitions.add(new TopicPartition("night_topic", 1));
- consumer.assign(topicPartitions);
-
- // 从指定的 offset 开始消费
- // consumer.seek(new TopicPartition("night_topic", 1), 3);
-
- ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(2));
- consumerRecords.forEach(System.out::println);
-
- // 手动提交 offset
- consumer.commitAsync();
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
- }

- <dependency>
- <groupId>org.springframework.kafka</groupId>
- <artifactId>spring-kafka</artifactId>
- </dependency>
- spring:
- kafka:
- bootstrap-servers: localhost:9092
- producer:
- batch-size: 16384
- acks: -1
- retries: 10
- transaction-id-prefix: transaction_05
- buffer-memory: 33554432
- key-serializer: org.apache.kafka.common.serialization.StringSerializer
- value-serializer: org.apache.kafka.common.serialization.StringSerializer
- properties:
- linger:
- ms: 2000
- partitioner:
- class: com.night.config.CustomerPartitionHandler
- consumer:
- group-id: g_01
- enable-auto-commit: false
- auto-offset-reset: latest
- max-poll-records: 500
- # auto-commit-interval: 2000 autoCommit = false
- key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
- value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
- properties:
- session:
- timeout:
- ms: 120000 # 消费会话超时时间(超过这个时间 consumer 没有发送心跳,就会触发 rebalance 操作)
- request:
- timeout:
- ms: 18000
-
- listener:
- missing-topics-fatal: false # consumer listener topics 不存在时,启动项目就会报错
- type: batch

- /**
- * @Author: chen yang
- * @Date: 2023/7/8 11:02
- */
- @Component
- public class CustomerPartitionHandler implements Partitioner {
-
- @Override
- public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
- if (value.toString().contains("二")){
- return 2;
- }else if (value.toString().contains("一")){
- return 1;
- }else {
- return 0;
- }
- }
-
- @Override
- public void close() {
-
- }
-
- @Override
- public void configure(Map<String, ?> configs) {
-
- }
- }

- @RestController
- @RequiredArgsConstructor
- public class HelloController {
-
- private final KafkaTemplate<String, String> kafkaTemplate;
-
- @GetMapping("/send")
- @Transactional // 配置文件中设置了事务id,那么启用时要加上 该注解或者使用 kafka 事务处理
- public Boolean send(String msg){
- for (int i = 0; i < 10; i++) {
- kafkaTemplate.send("night.topic", null, "night key - " + i, msg + " - " + i).addCallback(success -> {
- // 成功回调
- if (success == null || success.getRecordMetadata() == null){
- return;
- }
- String topic = success.getRecordMetadata().topic();
- int partition = success.getRecordMetadata().partition();
- long offset = success.getRecordMetadata().offset();
- String key = success.getProducerRecord().key();
- System.out.println("send topic:" + topic +", partition: " + partition + ", key:" + key + ", offset: " + offset);
- }, failure -> {
- // 失败回调
- System.out.println("发送消息失败:" + failure.getMessage());
- });
- }
- return true;
- }
- }

- /**
- * 消费数据过滤
- *
- * @Author: chen yang
- * @Date: 2023/7/8 12:20
- */
- @Component
- public class ConsumerFilterStrategy implements RecordFilterStrategy<String, String> {
- @Override
- public boolean filter(ConsumerRecord<String, String> consumerRecord) {
- // return true: 丢弃消息
- return consumerRecord.value().contains("无效数据");
- }
- }
- /**
- * 消费异常处理类
- *
- * @Author: chen yang
- * @Date: 2023/7/8 11:55
- */
- @Component
- public class ConsumerExceptionHandler implements ConsumerAwareListenerErrorHandler {
-
- @Override
- public Object handleError(Message<?> message, ListenerExecutionFailedException e, Consumer<?, ?> consumer) {
- System.out.println("消费异常:" + message.getPayload() + ", ex: " + e.getMessage());
- return null;
- }
- }
- /**
- * @Author: chen yang
- * @Date: 2023/7/8 12:22
- */
- @Configuration
- @RequiredArgsConstructor
- public class KafkaConsumerConfig {
-
- private final KafkaTemplate<String, String> kafkaTemplate;
-
- private final ConsumerFilterStrategy consumerFilterStrategy;
-
-
- @Bean
- public ConcurrentKafkaListenerContainerFactory<String, String> filterContainerFactory(){
- ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
- // @SendTo 使用,console exception: a KafkaTemplate is required to support replies
- factory.setReplyTemplate(kafkaTemplate);
-
- factory.setConsumerFactory(initConsumerFactory());
- // 设置并发量,小于或等于Topic的分区数,并且要在consumerFactory设置一次拉取的数量
- factory.setConcurrency(1);
-
- // 设置为批量监听
- factory.setBatchListener(true);
-
- // 配合RecordFilterStrategy使用,被过滤的信息将被丢弃
- factory.setAckDiscarded(true);
- factory.setRecordFilterStrategy(consumerFilterStrategy);
- return factory;
- }
-
-
-
- @Bean
- public ConsumerFactory<String, String> initConsumerFactory(){
- HashMap<String, Object> configs = new HashMap<>();
- configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
- configs.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_id_02");
- configs.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
- configs.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10 * 1000);
- configs.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
- configs.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);
- configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
- configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
- return new DefaultKafkaConsumerFactory<>(configs);
- }
- }

- @Component
- public class HelloListener {
-
- @KafkaListener(topicPartitions = {
- @TopicPartition(topic = "night.topic", partitions = {"0"})
- }, errorHandler = "consumerExceptionHandler", containerFactory = "filterContainerFactory")
- public void consumer0(List<ConsumerRecord<String, String>> records, Consumer<String, String> consumer){
- System.out.println("first consumer receive list size: " + records.size());
- records.forEach(System.out::println);
- consumer.commitSync();
- }
-
-
- @KafkaListener(topicPartitions = {
- @TopicPartition(topic = "night.topic", partitions = {"1"})
- }, errorHandler = "consumerExceptionHandler")
- @SendTo("singleTopic")
- public String consumer1(List<ConsumerRecord<String, String>> records, Consumer<String, String> consumer){
- System.out.println("second consumer receive list size: " + records.size());
- records.forEach(System.out::println);
- consumer.commitSync();
- return "@SendTo annotation msg";
- }
-
-
- @KafkaListener(topicPartitions = {
- @TopicPartition(topic = "night.topic", partitions = {"2"})
- }, errorHandler = "consumerExceptionHandler")
- public void consumer2(List<ConsumerRecord<String, String>> records, Consumer<String, String> consumer){
- System.out.println("third consumer receive list size: " + records.size());
- records.forEach(System.out::println);
- consumer.commitSync();
- }
- }

Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。