当前位置:   article > 正文

kafka-stream官方文档例子解析+springboot集成_stream流官方文档

stream流官方文档

1.搭建kafka环境(本地/容器)

推荐容器环境

docker-compose-kafka.yml

  1. version: '3.1'
  2. services:
  3. zookeeper:
  4. image: wurstmeister/zookeeper
  5. restart: always
  6. kafka:
  7. image: wurstmeister/kafka
  8. ports:
  9. - "9092:9092"
  10. environment:
  11. KAFKA_ADVERTISED_HOST_NAME: localhost
  12. KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181

docker-compose  -f docker-compose-kafka.yml up -d 一键部署

如果要清除,用 docker-compose down --remove-orphans

2.springboot建立kafka-stream

pom.xml依赖

  1. <properties>
  2. <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
  3. <kafka.version>3.1.0</kafka.version>
  4. </properties>
  5. <!-- Apache Kafka dependencies -->
  6. <dependency>
  7. <groupId>org.apache.kafka</groupId>
  8. <artifactId>kafka-streams</artifactId>
  9. <version>${kafka.version}</version>
  10. <exclusions>
  11. <exclusion>
  12. <groupId>org.apache.kafka</groupId>
  13. <artifactId>kafka-clients</artifactId>
  14. </exclusion>
  15. </exclusions>
  16. </dependency>
  17. <dependency>
  18. <groupId>org.apache.kafka</groupId>
  19. <artifactId>kafka-clients</artifactId>
  20. <version>${kafka.version}</version>
  21. </dependency>
  22. <dependency>
  23. <groupId>org.springframework.cloud</groupId>
  24. <artifactId>spring-cloud-starter-stream-kafka</artifactId>
  25. </dependency>

代码

  1. import org.apache.kafka.common.serialization.Serdes;
  2. import org.apache.kafka.common.utils.Bytes;
  3. import org.apache.kafka.streams.KafkaStreams;
  4. import org.apache.kafka.streams.StreamsBuilder;
  5. import org.apache.kafka.streams.StreamsConfig;
  6. import org.apache.kafka.streams.Topology;
  7. import org.apache.kafka.streams.kstream.KStream;
  8. import org.apache.kafka.streams.kstream.Materialized;
  9. import org.apache.kafka.streams.kstream.Produced;
  10. import org.apache.kafka.streams.state.KeyValueStore;
  11. import java.util.Arrays;
  12. import java.util.Locale;
  13. import java.util.Properties;
  14. import java.util.concurrent.CountDownLatch;
  15. public class WordCount {
  16. public static void main(String[] args) throws Exception {
  17. /**
  18. * 1.创建一个java.util.Properties映射来指定在StreamsConfig配置值。
  19. * (1)BOOTSTRAP_SERVERS_CONFIG,它指定了一个主机/端口对,表示Kafka地址;
  20. * (2)APPLICATION_ID_CONFIG,它提供了Streams应用程序的唯一标识符;
  21. * (3)其他配置,例如,记录键值对的默认序列化和反序列化库
  22. */
  23. Properties props = new Properties();
  24. props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
  25. props.put(StreamsConfig.APPLICATION_ID_CONFIG, "streams-wordcount");
  26. props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
  27. props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass());
  28. /**
  29. * 2.定义Streams应用程序的计算逻辑。
  30. * (1)定义为连接处理器节点的拓扑;
  31. */
  32. final StreamsBuilder builder = new StreamsBuilder();
  33. KStream<String, String> source = builder.stream("streams-plaintext-input");
  34. source.flatMapValues(value -> Arrays.asList(value.toLowerCase(Locale.getDefault()).split("\\W+")))
  35. .groupBy((key, value) -> value)
  36. .count(Materialized.<String, Long, KeyValueStore<Bytes, byte[]>>as("counts-store"))
  37. .toStream()
  38. .to("streams-wordcount-output", Produced.with(Serdes.String(), Serdes.Long()));
  39. final Topology topology = builder.build();
  40. /**
  41. * (2)将拓扑图放入KafkaStreams流client中
  42. */
  43. final KafkaStreams streams = new KafkaStreams(topology, props);
  44. /**
  45. * 3.启动stream流,让其一直运行
  46. * 通过调用它的start()函数,我们可以触发这个客户机的执行。在此客户机上调用close()之前,执行不会停止。
  47. * 例如,我们可以添加一个带有倒计时锁闩的关机钩子来捕获用户中断,并在终止程序时关闭客户端:
  48. */
  49. final CountDownLatch latch = new CountDownLatch(1);
  50. // attach shutdown handler to catch control-c
  51. Runtime.getRuntime().addShutdownHook(new Thread("streams-shutdown-hook") {
  52. @Override
  53. public void run() {
  54. streams.close();
  55. latch.countDown();
  56. }
  57. });
  58. try {
  59. streams.start();
  60. latch.await();
  61. } catch (Throwable e) {
  62. System.exit(1);
  63. }
  64. System.exit(0);
  65. }
  66. }

3.调测

(1)创建topic主题

docker exec -it docker-kafka-1 sh

cd /opt/kafka_2.13-2.8.1

kafka-topics.sh --create \
    --bootstrap-server localhost:9092 \
    --replication-factor 1 \
    --partitions 1 \
    --topic streams-plaintext-input
Created topic "streams-plaintext-input"

kafka-topics.sh --create \
    --bootstrap-server localhost:9092 \
    --replication-factor 1 \
    --partitions 1 \
    --topic streams-wordcount-output \
    --config cleanup.policy=compact
Created topic "streams-wordcount-output"

查看主题

kafka-topics.sh --bootstrap-server localhost:9092 --describe

(2)新建producer

 kafka-console-producer.sh --bootstrap-server localhost:9092 --topic streams-plaintext-input

(3)新建consumer

kafka-console-consumer.sh --bootstrap-server localhost:9092 \
    --topic streams-wordcount-output \
    --from-beginning \
    --formatter kafka.tools.DefaultMessageFormatter \
    --property print.key=true \
    --property print.value=true \
    --property key.deserializer=org.apache.kafka.common.serialization.StringDeserializer \
    --property value.deserializer=org.apache.kafka.common.serialization.LongDeserializer

(4)启动stream

在springboot中启动


(5)生产者发送数据测试

 

 完成!

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/笔触狂放9/article/detail/639898
推荐阅读
相关标签
  

闽ICP备14008679号