当前位置:   article > 正文

Java客户端版本:Kafka使用SASL_SSL方式加密验证_java kafka sasl

java kafka sasl

Kafka版本2.4
先上生成密钥和证书以及相关Kafka和zookeeper配置的链接
https://blog.csdn.net/qq_41527073/article/details/121148600
根据上文配置完之后就可以编写客户端了

JAVA客户端配置


import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.Map;

/**
 * @FileName: KafkaSecurityConfig.java
 * @Description: KafkaSecurityConfig.java类说明
 * @Date: 2023/2/1 17:16
 */
@Configuration
@EnableKafka
public class KafkaSecurityConfig {
    public class KafkaProducerConfig {
        @Autowired
        private KafkaProperties kafkaProperties;

        /**
         * 消费者配置
         */
        @Bean
        public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
            ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
            Map<String, Object> props = kafkaProperties.buildConsumerProperties();
            setSecurityConfig(props);
            factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(props));
            factory.setConcurrency(2);
            factory.getContainerProperties().setPollTimeout(1500);
            return factory;
        }


        /**
         * 生产者配置
         */
        @Bean
        public KafkaTemplate<String, String> kafkaTemplate() {
            Map<String, Object> props = kafkaProperties.buildProducerProperties();
            setSecurityConfig(props);
            return new KafkaTemplate<>(new DefaultKafkaProducerFactory<>(props));
        }

        private void setSecurityConfig(Map<String, Object> props) {
            props.put("sasl.jaas.config", "org.apache.kafka.common.security.plain.PlainLoginModule required username='kafka' password='123123123';");
            props.put("security.protocol", "SASL_SSL");
            props.put("ssl.endpoint.identification.algorithm","");
            props.put("sasl.mechanism", "PLAIN");
            props.put("ssl.truststore.location", "/home/admin/config/kafkaSsl/client.truststore.jks");
            props.put("ssl.truststore.password","kafkapwd");
        }
    }
}

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63

注意ssl.endpoint.identification.algorithm此配置一定要留空配置

Kafka安全相关配置类如上
配置完成再将连接Kafka服务端的配置配置完成就可以使用密文方式传输了。

Kafka连接配置文件

  kafka:
    listener:
      missing-topics-fatal: false
    bootstrap-servers: 172.XX.X.XX:9093
    producer: # producer 生产者
      retries: 0 # 重试次数
      acks: 1 # 应答级别:多少个分区副本备份完成时向生产者发送ack确认(可选0、1、all/-1)
      batch-size: 16384 # 批量大小
      buffer-memory: 33554432 # 生产端缓冲区大小
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      #      value-serializer: com.itheima.demo.config.MySerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer

    consumer: # consumer消费者
      group-id: 0 # 默认的消费组ID
      enable-auto-commit: true # 是否自动提交offset
      auto-commit-interval: 100  # 提交offset延时(接收到消息后多久提交offset)

      # earliest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
      # latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据
      # none:topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
      auto-offset-reset: latest
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      #      value-deserializer: com.itheima.demo.config.MyDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Gausst松鼠会/article/detail/391527
推荐阅读
相关标签
  

闽ICP备14008679号