赞
踩
pom.xml
:
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.kaven</groupId> <artifactId>kafka</artifactId> <version>1.0-SNAPSHOT</version> <properties> <maven.compiler.source>8</maven.compiler.source> <maven.compiler.target>8</maven.compiler.target> </properties> <dependencies> <dependency> <groupId>org.apache.kafka</groupId> <artifactId>kafka-clients</artifactId> <version>3.0.0</version> </dependency> </dependencies> </project>
创建Topic
:
package com.kaven.kafka.admin; import org.apache.kafka.clients.admin.*; import org.apache.kafka.common.KafkaFuture; import java.util.Collections; import java.util.Map; import java.util.Properties; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; public class Admin { // 基于Kafka服务地址与请求超时时间来创建AdminClient实例 private static final AdminClient adminClient = Admin.getAdminClient( "192.168.1.9:9092,192.168.1.9:9093,192.168.1.9:9094", "40000"); public static void main(String[] args) throws InterruptedException, ExecutionException { Admin admin = new Admin(); // 创建Topic,Topic名称为topic1,分区数为1,复制因子为1 admin.createTopic("topic1", 1, (short) 1); // 创建Topic,Topic名称为topic2,分区数为2,复制因子为1 admin.createTopic("topic2", 2, (short) 1); // 创建Topic,Topic名称为topic3,分区数为2,复制因子为1 admin.createTopic("topic3", 2, (short) 1); Thread.sleep(10000); } public static AdminClient getAdminClient(String address, String requestTimeoutMS) { Properties properties = new Properties(); properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, address); properties.setProperty(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMS); return AdminClient.create(properties); } public void createTopic(String name, int numPartitions, short replicationFactor) throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); CreateTopicsResult topics = adminClient.createTopics( Collections.singleton(new NewTopic(name, numPartitions, replicationFactor)) ); Map<String, KafkaFuture<Void>> values = topics.values(); values.forEach((name__, future) -> { future.whenComplete((a, throwable) -> { if(throwable != null) { System.out.println(throwable.getMessage()); } System.out.println(name__); latch.countDown(); }); }); latch.await(); } }
Producer
发布消息:
package com.kaven.kafka.producer; import org.apache.kafka.clients.producer.*; import java.util.Properties; import java.util.concurrent.ExecutionException; public class ProducerTest { public static void main(String[] args) throws ExecutionException, InterruptedException { send("topic1"); send("topic2"); send("topic3"); } public static void send(String name) throws ExecutionException, InterruptedException { Producer<String, String> producer = ProducerTest.createProducer(); for (int i = 0; i < 7; i++) { ProducerRecord<String, String> producerRecord = new ProducerRecord<>( name, "key-" + i, "value-" + i ); // 异步发送并回调 producer.send(producerRecord, (metadata, exception) -> { if(exception == null) { System.out.printf("topic: %s, partition: %s, offset: %s\n", name, metadata.partition(), metadata.offset()); } else { exception.printStackTrace(); } }); } // 要关闭Producer实例 producer.close(); } public static Producer<String, String> createProducer() { // Producer的配置 Properties properties = new Properties(); // 服务地址 properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.9:9092,192.168.1.9:9093,192.168.1.9:9094"); // KEY的序列化器类 properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); // VALUE的序列化器类 properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer"); return new KafkaProducer<>(properties); } }
Consumer
订阅程序:
package com.kaven.kafka.consumer; import org.apache.kafka.clients.consumer.*; import java.time.Duration; import java.util.*; public class ConsumerTest { public static void main(String[] args) throws InterruptedException { pausePartition(Arrays.asList("topic1", "topic2", "topic3")); } public static void pausePartition(List<String> topicList) throws InterruptedException { KafkaConsumer<String, String> consumer = createConsumer(); consumer.subscribe(topicList); while (true) { // 拉取消息 ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000)); records.partitions().forEach((partition) -> { // 从该分区拉取的消息 List<ConsumerRecord<String, String>> recordsWithPartition = records.records(partition); recordsWithPartition.forEach((record) -> { System.out.printf("topic: %s, partition: %s, offset: %s, key: %s, value: %s\n", record.topic(), record.partition(), record.offset(), record.key(), record.value()); }); // 暂停拉取分区1的消息 if(partition.partition() == 1) { consumer.pause(Collections.singleton(partition)); } }); } } public static KafkaConsumer<String, String> createConsumer() { // Consumer的配置 Properties properties = new Properties(); // 服务地址 properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.9:9092,192.168.1.9:9093,192.168.1.9:9094"); // 组ID,用于标识此消费者所属的消费者组 properties.put(ConsumerConfig.GROUP_ID_CONFIG, "kaven-test"); // 开启offset自动提交 properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); // 消费者offset自动提交到Kafka的频率(以毫秒为单位) properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); // KEY的反序列化器类 properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); // VALUE的反序列化器类 properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); return new KafkaConsumer<>(properties); } }
暂停拉取分区1
的消息:
// 暂停拉取分区1的消息
if(partition.partition() == 1) {
consumer.pause(Collections.singleton(partition));
}
由于是先拉取消息后再暂停拉取分区1
的消息,因此开始会拉取分区1
的消息,之后就只会拉取分区0
(测试的Topic
最多只有两个分区)的信息。
先创建Topic
,然后运行Consumer
订阅程序,再使用Producer
发布两次消息,之后Consumer
就可以订阅到消息了,输出如下所示:
// 第一次发布的消息 topic: topic1, partition: 0, offset: 147, key: key-0, value: value-0 topic: topic1, partition: 0, offset: 148, key: key-1, value: value-1 topic: topic1, partition: 0, offset: 149, key: key-2, value: value-2 topic: topic1, partition: 0, offset: 150, key: key-3, value: value-3 topic: topic1, partition: 0, offset: 151, key: key-4, value: value-4 topic: topic1, partition: 0, offset: 152, key: key-5, value: value-5 topic: topic1, partition: 0, offset: 153, key: key-6, value: value-6 topic: topic2, partition: 0, offset: 84, key: key-1, value: value-1 topic: topic2, partition: 0, offset: 85, key: key-2, value: value-2 topic: topic2, partition: 0, offset: 86, key: key-5, value: value-5 topic: topic2, partition: 0, offset: 87, key: key-6, value: value-6 topic: topic2, partition: 1, offset: 63, key: key-0, value: value-0 topic: topic2, partition: 1, offset: 64, key: key-3, value: value-3 topic: topic2, partition: 1, offset: 65, key: key-4, value: value-4 topic: topic3, partition: 0, offset: 84, key: key-1, value: value-1 topic: topic3, partition: 0, offset: 85, key: key-2, value: value-2 topic: topic3, partition: 0, offset: 86, key: key-5, value: value-5 topic: topic3, partition: 0, offset: 87, key: key-6, value: value-6 topic: topic3, partition: 1, offset: 63, key: key-0, value: value-0 topic: topic3, partition: 1, offset: 64, key: key-3, value: value-3 topic: topic3, partition: 1, offset: 65, key: key-4, value: value-4 // 第二次发布的消息 topic: topic1, partition: 0, offset: 154, key: key-0, value: value-0 topic: topic1, partition: 0, offset: 155, key: key-1, value: value-1 topic: topic1, partition: 0, offset: 156, key: key-2, value: value-2 topic: topic1, partition: 0, offset: 157, key: key-3, value: value-3 topic: topic1, partition: 0, offset: 158, key: key-4, value: value-4 topic: topic1, partition: 0, offset: 159, key: key-5, value: value-5 topic: topic1, partition: 0, offset: 160, key: key-6, value: value-6 topic: topic2, partition: 0, offset: 88, key: key-1, value: value-1 topic: topic2, partition: 0, offset: 89, key: key-2, value: value-2 topic: topic2, partition: 0, offset: 90, key: key-5, value: value-5 topic: topic2, partition: 0, offset: 91, key: key-6, value: value-6 topic: topic3, partition: 0, offset: 88, key: key-1, value: value-1 topic: topic3, partition: 0, offset: 89, key: key-2, value: value-2 topic: topic3, partition: 0, offset: 90, key: key-5, value: value-5 topic: topic3, partition: 0, offset: 91, key: key-6, value: value-6
输出符合预期。
修改Consumer
订阅程序:
public static void pausePartition(List<String> topicList) throws InterruptedException { KafkaConsumer<String, String> consumer = createConsumer(); consumer.subscribe(topicList); // 恢复被暂停拉取消息的分区 resume(consumer); while (true) { // 拉取消息 ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(10000)); records.partitions().forEach((partition) -> { // 从该分区拉取的消息 List<ConsumerRecord<String, String>> recordsWithPartition = records.records(partition); recordsWithPartition.forEach((record) -> { System.out.printf("topic: %s, partition: %s, offset: %s, key: %s, value: %s\n", record.topic(), record.partition(), record.offset(), record.key(), record.value()); }); }); } } // 用于恢复被暂停拉取消息的分区 public static void resume(KafkaConsumer<String, String> consumer) { consumer.resume(consumer.paused()); }
下面这行代码会恢复所有被暂停拉取的分区。
consumer.resume(consumer.paused());
运行Consumer
订阅程序,Producer
不发布消息,而Consumer
也可以订阅到消息,输出如下所示(可能需要等一会):
topic: topic3, partition: 1, offset: 66, key: key-0, value: value-0
topic: topic3, partition: 1, offset: 67, key: key-3, value: value-3
topic: topic3, partition: 1, offset: 68, key: key-4, value: value-4
topic: topic2, partition: 1, offset: 66, key: key-0, value: value-0
topic: topic2, partition: 1, offset: 67, key: key-3, value: value-3
topic: topic2, partition: 1, offset: 68, key: key-4, value: value-4
很显然这些消息是之前被暂停拉取的分区中的消息,现在恢复拉取了,消息就又可以被消费了,因此,可以基于暂停与恢复从Partition
拉取消息来实现Kafka
的限流逻辑。Consumer
暂停与恢复从Partition
拉取消息就介绍到这里,如果博主有说错的地方或者大家有不同的见解,欢迎大家评论补充。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。