当前位置:   article > 正文

java代码访问kafka集群_java连接kafka集群

java连接kafka集群

本例环境:
        jdk 1.7
        zookeeper-3.4.10
        kafka_2.11-0.11
环境搭建可参考如下:
        kafka环境搭建(windows版本) : https://blog.csdn.net/zhangbeizhen18/article/details/101323691
        kafka集群环境搭建(windows版本) : https://blog.csdn.net/zhangbeizhen18/article/details/102533131
代码下载地址: 
       
 https://github.com/zhangbeizhen/spring-kafka
java代码访问:

        java代码访问kafka集群和访问单例kafka使用代码相同,本例差异在于application.properties配置文件中
        bootstrap.servers配置
        访问kafka集群配置:
        bootstrap.servers=127.0.0.1:9092,127.0.0.1:9093,127.0.0.1:9094
        访问
kafka单例配置
        bootstrap.servers=127.0.0.1:9092

1.生产者代码 KafkaProducerService 

  1. @Service
  2. public class KafkaProducerService {
  3. private static final Logger logger = LoggerFactory.getLogger(KafkaProducerService.class);
  4. public static <K, T> void sendMessage(String topic, Integer partition, Long timestamp, K key, T data) {
  5. KafkaTemplate<K, T> kafkaTemplate = (KafkaTemplate<K, T>) CommonUtils.getBean("kafkaTemplate");
  6. ListenableFuture<SendResult<K, T>> listenableFuture = null;
  7. if (kafkaTemplate.getDefaultTopic().equals(topic)) {
  8. listenableFuture = kafkaTemplate.sendDefault(partition, timestamp, key, data);
  9. } else {
  10. listenableFuture = kafkaTemplate.send(topic, partition, timestamp, key, data);
  11. }
  12. /**发送成功回调*/
  13. SuccessCallback<SendResult<K, T>> successCallback = new SuccessCallback<SendResult<K, T>>() {
  14. @Override
  15. public void onSuccess(SendResult<K, T> result) {
  16. // 成功业务逻辑
  17. logger.info("生产者生产成功.");
  18. }
  19. };
  20. /**发送失败回调*/
  21. FailureCallback failureCallback = new FailureCallback() {
  22. @Override
  23. public void onFailure(Throwable e) {
  24. /**异常处理*/
  25. e.printStackTrace();
  26. throw new RuntimeException(e);
  27. }
  28. };
  29. listenableFuture.addCallback(successCallback, failureCallback);
  30. }
  31. }

2.消费者代码 KafkaConsumerService

  1. public class KafkaConsumerService implements MessageListener<String, String> {
  2. private static final Logger logger = LoggerFactory.getLogger(KafkaConsumerService.class);
  3. @Override
  4. public void onMessage(ConsumerRecord<String, String> data) {
  5. logger.info("===消费者消费前=====");
  6. if ("helloTopic".equals(data.topic())) {
  7. logger.info("消费者使用helloTopic消费信息: " + data.value());
  8. }
  9. logger.info("===消费者消费后=====");
  10. }
  11. }

3.生产者配置 kafka-provider.xml

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <beans xmlns="http://www.springframework.org/schema/beans"
  3. xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
  4. xsi:schemaLocation="http://www.springframework.org/schema/beans
  5. http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
  6. http://www.springframework.org/schema/context
  7. http://www.springframework.org/schema/context/spring-context.xsd">
  8. <!-- 1.定义producer的参数 -->
  9. <bean id="producerProperties" class="java.util.HashMap">
  10. <constructor-arg>
  11. <map>
  12. <entry key="bootstrap.servers" value="${bootstrap.servers}" />
  13. <entry key="group.id" value="${group.id}" />
  14. <entry key="retries" value="${retries}" />
  15. <entry key="batch.size" value="${batch.size}" />
  16. <entry key="linger.ms" value="${linger.ms}" />
  17. <entry key="buffer.memory" value="${buffer.memory}" />
  18. <entry key="acks" value="${acks}" />
  19. <entry key="key.serializer"
  20. value="org.apache.kafka.common.serialization.StringSerializer" />
  21. <entry key="value.serializer"
  22. value="org.apache.kafka.common.serialization.StringSerializer" />
  23. </map>
  24. </constructor-arg>
  25. </bean>
  26. <!-- 2.创建生产者工厂对象 -->
  27. <bean id="producerFactory"
  28. class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
  29. <constructor-arg>
  30. <ref bean="producerProperties" />
  31. </constructor-arg>
  32. </bean>
  33. <!-- 3.创建kafkaTemplate 模板对象,使用的时候,只需要获取这个bean,即可使用模板类的方法 -->
  34. <bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
  35. <constructor-arg ref="producerFactory" />
  36. <constructor-arg name="autoFlush" value="true" />
  37. <property name="defaultTopic" value="default" />
  38. </bean>
  39. </beans>

4.消费者配置 kafka-customer.xml

  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <beans xmlns="http://www.springframework.org/schema/beans"
  3. xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
  4. xsi:schemaLocation="http://www.springframework.org/schema/beans
  5. http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
  6. http://www.springframework.org/schema/context
  7. http://www.springframework.org/schema/context/spring-context.xsd">
  8. <!-- 1.定义consumer的参数 -->
  9. <bean id="consumerProperties" class="java.util.HashMap">
  10. <constructor-arg>
  11. <map>
  12. <entry key="bootstrap.servers" value="${bootstrap.servers}" />
  13. <entry key="group.id" value="${group.id}" />
  14. <entry key="enable.auto.commit" value="${enable.auto.commit}" />
  15. <entry key="session.timeout.ms" value="${session.timeout.ms}" />
  16. <entry key="key.deserializer"
  17. value="org.apache.kafka.common.serialization.StringDeserializer" />
  18. <entry key="value.deserializer"
  19. value="org.apache.kafka.common.serialization.StringDeserializer" />
  20. </map>
  21. </constructor-arg>
  22. </bean>
  23. <!-- 2.创建消费者工厂对象 -->
  24. <bean id="consumerFactory"
  25. class="org.springframework.kafka.core.DefaultKafkaConsumerFactory" >
  26. <constructor-arg>
  27. <ref bean="consumerProperties" />
  28. </constructor-arg>
  29. </bean>
  30. <!-- 3.定义消费实现类 -->
  31. <bean id="kafkaConsumerService" class="com.zbz.service.KafkaConsumerService" />
  32. <!-- 4.消费者容器配置信息 -->
  33. <bean id="containerProperties" class="org.springframework.kafka.listener.config.ContainerProperties">
  34. <!-- topics -->
  35. <constructor-arg name="topics">
  36. <list>
  37. <value>helloTopic</value>
  38. </list>
  39. </constructor-arg>
  40. <property name="messageListener" ref="kafkaConsumerService" />
  41. </bean>
  42. <!-- 5.消费者并发消息监听容器,执行doStart()方法 -->
  43. <bean id="messageListenerContainer" class="org.springframework.kafka.listener.ConcurrentMessageListenerContainer" init-method="doStart" >
  44. <constructor-arg ref="consumerFactory" />
  45. <constructor-arg ref="containerProperties" />
  46. <property name="concurrency" value="${concurrency}" />
  47. </bean>
  48. </beans>

5.全局配置

  1. #kafka brokers配置
  2. #bootstrap.servers=127.0.0.1:9092
  3. bootstrap.servers=127.0.0.1:9092,127.0.0.1:9093,127.0.0.1:9094
  4. #即所有副本都同步到数据时send方法才返回, 以此来完全判断数据是否发送成功, 理论上来讲数据不会丢失.
  5. acks=all
  6. #发送失败重试次数
  7. retries=10
  8. #批处理条数:当多个记录被发送到同一个分区时,生产者会尝试将记录合并到更少的请求中。这有助于客户端和服务器的性能。
  9. batch.size=1638
  10. #批处理延迟时间上限:即1ms过后,不管是否达到批处理数,都直接发送一次请求
  11. linger.ms=1
  12. #即32MB的批处理缓冲区
  13. buffer.memory=33554432
  14. #消费者群组ID,发布-订阅模式,即如果一个生产者,多个消费者都要消费,那么需要定义自己的群组,同一群组内的消费者只有一个能消费到消息
  15. group.id=order-beta
  16. #如果为true,消费者的偏移量将在后台定期提交。
  17. enable.auto.commit=true
  18. #如何设置为自动提交(enable.auto.commit=true),这里设置自动提交周期
  19. auto.commit.interval.ms=1000
  20. #在使用Kafka的组管理时,用于检测消费者故障的超时
  21. session.timeout.ms=15000
  22. #消费监听器容器并发数
  23. concurrency = 5

6.测试: KafkaController 

  1. @Controller
  2. @RequestMapping("/service")
  3. public class KafkaController {
  4. @Autowired
  5. private KafkaProducerService producerService;
  6. @ResponseBody
  7. @RequestMapping(value = "kafka" ,method = RequestMethod.GET )
  8. public String service( String str) throws Exception{
  9. String result ="{\"result\" : \"success\"}";
  10. for(int i=0;i<20;i++){
  11. String kaf = "Hello Kafka,Welcome to the world." +"This is Kafka_"+ i ;
  12. producerService.sendMessage("helloTopic",0,null,"key",kaf);
  13. }
  14. return result;
  15. }
  16. }

6.1 测试URL
    http://127.0.0.1:8080/spring-kafka/service/kafka?str=test
6.2 查看topic
    在D:\aServ\kafk\kafka_2.11-0.11.0.0\bin\windows目录使用使用Windows Power Shell执行命令
    .\kafka-topics.bat --zookeeper 127.0.0.1:2181 --describe --topic helloTopic
    截图信息:
         
6.3 输出日志

以上,感谢.

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小蓝xlanll/article/detail/629386
推荐阅读
相关标签
  

闽ICP备14008679号