赞
踩
本例环境:
jdk 1.7
zookeeper-3.4.10
kafka_2.11-0.11
环境搭建可参考如下:
kafka环境搭建(windows版本) : https://blog.csdn.net/zhangbeizhen18/article/details/101323691
kafka集群环境搭建(windows版本) : https://blog.csdn.net/zhangbeizhen18/article/details/102533131
代码下载地址:
https://github.com/zhangbeizhen/spring-kafka
java代码访问:
java代码访问kafka集群和访问单例kafka使用代码相同,本例差异在于application.properties配置文件中
bootstrap.servers配置
访问kafka集群配置:
bootstrap.servers=127.0.0.1:9092,127.0.0.1:9093,127.0.0.1:9094
访问kafka单例配置
bootstrap.servers=127.0.0.1:9092
1.生产者代码 KafkaProducerService
- @Service
- public class KafkaProducerService {
- private static final Logger logger = LoggerFactory.getLogger(KafkaProducerService.class);
- public static <K, T> void sendMessage(String topic, Integer partition, Long timestamp, K key, T data) {
- KafkaTemplate<K, T> kafkaTemplate = (KafkaTemplate<K, T>) CommonUtils.getBean("kafkaTemplate");
- ListenableFuture<SendResult<K, T>> listenableFuture = null;
- if (kafkaTemplate.getDefaultTopic().equals(topic)) {
- listenableFuture = kafkaTemplate.sendDefault(partition, timestamp, key, data);
- } else {
- listenableFuture = kafkaTemplate.send(topic, partition, timestamp, key, data);
- }
- /**发送成功回调*/
- SuccessCallback<SendResult<K, T>> successCallback = new SuccessCallback<SendResult<K, T>>() {
- @Override
- public void onSuccess(SendResult<K, T> result) {
- // 成功业务逻辑
- logger.info("生产者生产成功.");
- }
- };
- /**发送失败回调*/
- FailureCallback failureCallback = new FailureCallback() {
- @Override
- public void onFailure(Throwable e) {
- /**异常处理*/
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- };
- listenableFuture.addCallback(successCallback, failureCallback);
- }
- }
2.消费者代码 KafkaConsumerService
- public class KafkaConsumerService implements MessageListener<String, String> {
- private static final Logger logger = LoggerFactory.getLogger(KafkaConsumerService.class);
- @Override
- public void onMessage(ConsumerRecord<String, String> data) {
- logger.info("===消费者消费前=====");
- if ("helloTopic".equals(data.topic())) {
- logger.info("消费者使用helloTopic消费信息: " + data.value());
- }
- logger.info("===消费者消费后=====");
- }
- }
3.生产者配置 kafka-provider.xml
- <?xml version="1.0" encoding="UTF-8"?>
- <beans xmlns="http://www.springframework.org/schema/beans"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
- xsi:schemaLocation="http://www.springframework.org/schema/beans
- http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
- http://www.springframework.org/schema/context
- http://www.springframework.org/schema/context/spring-context.xsd">
-
- <!-- 1.定义producer的参数 -->
- <bean id="producerProperties" class="java.util.HashMap">
- <constructor-arg>
- <map>
- <entry key="bootstrap.servers" value="${bootstrap.servers}" />
- <entry key="group.id" value="${group.id}" />
- <entry key="retries" value="${retries}" />
- <entry key="batch.size" value="${batch.size}" />
- <entry key="linger.ms" value="${linger.ms}" />
- <entry key="buffer.memory" value="${buffer.memory}" />
- <entry key="acks" value="${acks}" />
- <entry key="key.serializer"
- value="org.apache.kafka.common.serialization.StringSerializer" />
- <entry key="value.serializer"
- value="org.apache.kafka.common.serialization.StringSerializer" />
- </map>
- </constructor-arg>
- </bean>
- <!-- 2.创建生产者工厂对象 -->
- <bean id="producerFactory"
- class="org.springframework.kafka.core.DefaultKafkaProducerFactory">
- <constructor-arg>
- <ref bean="producerProperties" />
- </constructor-arg>
- </bean>
- <!-- 3.创建kafkaTemplate 模板对象,使用的时候,只需要获取这个bean,即可使用模板类的方法 -->
- <bean id="kafkaTemplate" class="org.springframework.kafka.core.KafkaTemplate">
- <constructor-arg ref="producerFactory" />
- <constructor-arg name="autoFlush" value="true" />
- <property name="defaultTopic" value="default" />
- </bean>
- </beans>
4.消费者配置 kafka-customer.xml
- <?xml version="1.0" encoding="UTF-8"?>
- <beans xmlns="http://www.springframework.org/schema/beans"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
- xsi:schemaLocation="http://www.springframework.org/schema/beans
- http://www.springframework.org/schema/beans/spring-beans-3.0.xsd
- http://www.springframework.org/schema/context
- http://www.springframework.org/schema/context/spring-context.xsd">
-
- <!-- 1.定义consumer的参数 -->
- <bean id="consumerProperties" class="java.util.HashMap">
- <constructor-arg>
- <map>
- <entry key="bootstrap.servers" value="${bootstrap.servers}" />
- <entry key="group.id" value="${group.id}" />
- <entry key="enable.auto.commit" value="${enable.auto.commit}" />
- <entry key="session.timeout.ms" value="${session.timeout.ms}" />
- <entry key="key.deserializer"
- value="org.apache.kafka.common.serialization.StringDeserializer" />
- <entry key="value.deserializer"
- value="org.apache.kafka.common.serialization.StringDeserializer" />
- </map>
- </constructor-arg>
- </bean>
- <!-- 2.创建消费者工厂对象 -->
- <bean id="consumerFactory"
- class="org.springframework.kafka.core.DefaultKafkaConsumerFactory" >
- <constructor-arg>
- <ref bean="consumerProperties" />
- </constructor-arg>
- </bean>
- <!-- 3.定义消费实现类 -->
- <bean id="kafkaConsumerService" class="com.zbz.service.KafkaConsumerService" />
- <!-- 4.消费者容器配置信息 -->
- <bean id="containerProperties" class="org.springframework.kafka.listener.config.ContainerProperties">
- <!-- topics -->
- <constructor-arg name="topics">
- <list>
- <value>helloTopic</value>
- </list>
- </constructor-arg>
- <property name="messageListener" ref="kafkaConsumerService" />
- </bean>
- <!-- 5.消费者并发消息监听容器,执行doStart()方法 -->
- <bean id="messageListenerContainer" class="org.springframework.kafka.listener.ConcurrentMessageListenerContainer" init-method="doStart" >
- <constructor-arg ref="consumerFactory" />
- <constructor-arg ref="containerProperties" />
- <property name="concurrency" value="${concurrency}" />
- </bean>
- </beans>
5.全局配置
- #kafka brokers配置
- #bootstrap.servers=127.0.0.1:9092
- bootstrap.servers=127.0.0.1:9092,127.0.0.1:9093,127.0.0.1:9094
- #即所有副本都同步到数据时send方法才返回, 以此来完全判断数据是否发送成功, 理论上来讲数据不会丢失.
- acks=all
- #发送失败重试次数
- retries=10
- #批处理条数:当多个记录被发送到同一个分区时,生产者会尝试将记录合并到更少的请求中。这有助于客户端和服务器的性能。
- batch.size=1638
- #批处理延迟时间上限:即1ms过后,不管是否达到批处理数,都直接发送一次请求
- linger.ms=1
- #即32MB的批处理缓冲区
- buffer.memory=33554432
- #消费者群组ID,发布-订阅模式,即如果一个生产者,多个消费者都要消费,那么需要定义自己的群组,同一群组内的消费者只有一个能消费到消息
- group.id=order-beta
- #如果为true,消费者的偏移量将在后台定期提交。
- enable.auto.commit=true
- #如何设置为自动提交(enable.auto.commit=true),这里设置自动提交周期
- auto.commit.interval.ms=1000
- #在使用Kafka的组管理时,用于检测消费者故障的超时
- session.timeout.ms=15000
- #消费监听器容器并发数
- concurrency = 5
6.测试: KafkaController
- @Controller
- @RequestMapping("/service")
- public class KafkaController {
- @Autowired
- private KafkaProducerService producerService;
- @ResponseBody
- @RequestMapping(value = "kafka" ,method = RequestMethod.GET )
- public String service( String str) throws Exception{
- String result ="{\"result\" : \"success\"}";
- for(int i=0;i<20;i++){
- String kaf = "Hello Kafka,Welcome to the world." +"This is Kafka_"+ i ;
- producerService.sendMessage("helloTopic",0,null,"key",kaf);
- }
- return result;
- }
- }
6.1 测试URL
http://127.0.0.1:8080/spring-kafka/service/kafka?str=test
6.2 查看topic
在D:\aServ\kafk\kafka_2.11-0.11.0.0\bin\windows目录使用使用Windows Power Shell执行命令
.\kafka-topics.bat --zookeeper 127.0.0.1:2181 --describe --topic helloTopic
截图信息:
6.3 输出日志
以上,感谢.
赞
踩
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。