赞
踩
说明:linux系统默认的可操作句柄数是65535,es集群默认的进程句柄数需要至少为262144个,如果我们想正常启动es集群,我们需要调大这个参数。
说明:在使用docker-compose启动多个容器时,在其默认的启动时间60s内无法全部启动完成,容器就会整个启动失败。这里我们将此参数调大到1000s。使用vi编辑器修改系统环境变量文件/etc/profile,在文件的末尾添加俩个参数,然后更新系统参数,使新添加的参数配置生效。
参数:
export DOCKER_CLIENT_TIMEOUT=1000
export COMPOSE_HTTP_TIMEOUT=1000
#elk-cluster集群配置文件 #version: "3.3" services: #######################elasticsearch集群配置################ es01: #镜像名称 image: elasticsearch:7.12.0 #容器名称 container_name: elk-cluster-es01 hostname: es01 #开机自启动 restart: always privileged: true #环境变量设置 environment: #节点名称 - node.name=es01 #集群名称 - cluster.name=elk-cluster-es #其它节点 - discovery.seed_hosts=es02,es03 - cluster.initial_master_nodes=es01,es02,es03 - bootstrap.memory_lock=true #加入跨域配置 - http.cors.enabled=true - http.cors.allow-origin=* #java堆内存大小设置 - "ES_JAVA_OPTS=-Xms512m -Xmx512m" #开启读写权限 - "TAKE_FILE_OWNERSHIP=true" ulimits: memlock: soft: -1 hard: -1 #数据卷映射 volumes: - /data/deploy/elk/elasticsearch/01/data:/usr/share/elasticsearch/data - /data/deploy/elk/elasticsearch/01/logs:/usr/share/elasticsearch/logs #端口映射 ports: - 9200:9200 #网络配置 networks: - elk es02: image: elasticsearch:7.12.0 container_name: elk-cluster-es02 hostname: es02 restart: always privileged: true environment: - node.name=es02 - cluster.name=elk-cluster-es - discovery.seed_hosts=es01,es03 - cluster.initial_master_nodes=es01,es02,es03 - bootstrap.memory_lock=true #加入跨域配置 - http.cors.enabled=true - http.cors.allow-origin=* - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "TAKE_FILE_OWNERSHIP=true" ulimits: memlock: soft: -1 hard: -1 volumes: - /data/deploy/elk/elasticsearch/02/data:/usr/share/elasticsearch/data - /data/deploy/elk/elasticsearch/02/logs:/usr/share/elasticsearch/logs #网络配置 networks: - elk es03: image: elasticsearch:7.12.0 container_name: elk-cluster-es03 hostname: es03 restart: always privileged: true environment: - node.name=es03 - cluster.name=elk-cluster-es - discovery.seed_hosts=es01,es02 - cluster.initial_master_nodes=es01,es02,es03 - bootstrap.memory_lock=true #加入跨域配置 - http.cors.enabled=true - http.cors.allow-origin=* - "ES_JAVA_OPTS=-Xms512m -Xmx512m" - "TAKE_FILE_OWNERSHIP=true" ulimits: memlock: soft: -1 hard: -1 volumes: - /data/deploy/elk/elasticsearch/03/data:/usr/share/elasticsearch/data - /data/deploy/elk/elasticsearch/03/logs:/usr/share/elasticsearch/logs #端口映射 networks: - elk #####################kibana配置#################################### kibana: image: kibana:7.12.0 container_name: elk-cluster-kibana hostname: kibana restart: always environment: #elasticsearch服务地址 ELASTICSEARCH_HOSTS: "http://es01:9200" #kibana语言配置:en、zh-CN、ja-JP I18N_LOCALE: "zh-CN" ulimits: memlock: soft: -1 hard: -1 #端口映射 ports: - 5601:5601 networks: - elk depends_on: - es01 - es02 - es03 #####################kibana配置#################################### nginx: image: nginx:stable-alpine-perl container_name: elk-cluster-nginx hostname: nginx restart: always ulimits: memlock: soft: -1 hard: -1 #端口映射 ports: - 88:80 networks: - elk depends_on: - kibana #####################logstash配置#################################### logstash01: image: logstash:7.12.0 container_name: elk-cluster-logstash01 hostname: logstash01 restart: always environment: #elasticsearch服务地址 - monitoring.elasticsearch.hosts="http://es01:9200" ports: - 9600:9600 - 5044:5044 networks: - elk depends_on: - es01 - es02 - es03 logstash02: image: logstash:7.12.0 container_name: elk-cluster-logstash02 hostname: logstash02 restart: always environment: #elasticsearch服务地址 - monitoring.elasticsearch.hosts="http://es01:9200" ports: - 9601:9600 - 5045:5044 networks: - elk depends_on: - es01 - es02 - es03 logstash03: image: logstash:7.12.0 container_name: elk-cluster-logstash03 hostname: logstash03 restart: always environment: #elasticsearch服务地址 - monitoring.elasticsearch.hosts="http://es01:9200" ports: - 9602:9600 - 5046:5044 networks: - elk depends_on: - es01 - es02 - es03 #####################kafka集群相关配置#################################### #zookeeper集群 zk01: image: zookeeper:3.7.0 restart: always container_name: elk-cluster-zk01 hostname: zk01 ports: - 2181:2181 networks: - elk volumes: - "/data/deploy/elk/zookeeper/zk01/data:/data" - "/data/deploy/elk/zookeeper/zk01/logs:/datalog" environment: ZOO_MY_ID: 1 ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zk02:2888:3888;2181 server.3=zk03:2888:3888;2181 depends_on: - es01 - es02 - es03 zk02: image: zookeeper:3.7.0 restart: always container_name: elk-cluster-zk02 hostname: zk02 ports: - 2182:2181 networks: - elk volumes: - "/data/deploy/elk/zookeeper/zk02/data:/data" - "/data/deploy/elk/zookeeper/zk02/logs:/datalog" environment: ZOO_MY_ID: 2 ZOO_SERVERS: server.1=zk01:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zk03:2888:3888;2181 depends_on: - es01 - es02 - es03 zk03: image: zookeeper:3.7.0 restart: always container_name: elk-cluster-zk03 hostname: zk03 ports: - 2183:2181 networks: - elk volumes: - "/data/deploy/elk/zookeeper/zk03/data:/data" - "/data/deploy/elk/zookeeper/zk03/logs:/datalog" environment: ZOO_MY_ID: 3 ZOO_SERVERS: server.1=zk01:2888:3888;2181 server.2=zk02:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181 depends_on: - es01 - es02 - es03 #kafka集群 kafka01: image: wurstmeister/kafka:2.13-2.7.0 restart: always container_name: elk-cluster-kafka01 hostname: kafka01 ports: - "9091:9092" - "9991:9991" networks: - elk depends_on: - zk01 - zk02 - zk03 environment: KAFKA_BROKER_ID: 1 KAFKA_ADVERTISED_HOST_NAME: kafka01 KAFKA_ADVERTISED_PORT: 9091 KAFKA_HOST_NAME: kafka01 KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181 KAFKA_LISTENERS: PLAINTEXT://kafka01:9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.168.85:9091 JMX_PORT: 9991 KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka01 -Dcom.sun.management.jmxremote.port=9991 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" volumes: - "/data/deploy/elk/kafka/kafka01/:/kafka" kafka02: image: wurstmeister/kafka:2.13-2.7.0 restart: always container_name: elk-cluster-kafka02 hostname: kafka02 ports: - "9092:9092" - "9992:9992" networks: - elk depends_on: - zk01 - zk02 - zk03 environment: KAFKA_BROKER_ID: 2 KAFKA_ADVERTISED_HOST_NAME: kafka02 KAFKA_ADVERTISED_PORT: 9092 KAFKA_HOST_NAME: kafka02 KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181 KAFKA_LISTENERS: PLAINTEXT://kafka02:9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.168.85:9092 JMX_PORT: 9992 KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka02 -Dcom.sun.management.jmxremote.port=9992 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" volumes: - "/data/deploy/elk/kafka/kafka02/:/kafka" kafka03: image: wurstmeister/kafka:2.13-2.7.0 restart: always container_name: elk-cluster-kafka03 hostname: kafka03 ports: - "9093:9092" - "9993:9993" networks: - elk depends_on: - zk01 - zk02 - zk03 environment: KAFKA_BROKER_ID: 3 KAFKA_ADVERTISED_HOST_NAME: kafka03 KAFKA_ADVERTISED_PORT: 9093 KAFKA_HOST_NAME: kafka03 KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181 KAFKA_LISTENERS: PLAINTEXT://kafka03:9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.23.134:9093 JMX_PORT: 9993 KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka03 -Dcom.sun.management.jmxremote.port=9993 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" volumes: - "/data/deploy/elk/kafka/kafka3/:/kafka" #kafka管理工具 'kafka-manager': container_name: elk-cluster-kafka-manager image: sheepkiller/kafka-manager:stable restart: always ports: - 9000:9000 networks: - elk depends_on: - kafka01 - kafka02 - kafka03 environment: KM_VERSION: 1.3.3.18 ZK_HOSTS: zk01:2181,zk02:2181,zk03:2181 #kafka监控工具 'kafka-offset-monitor': container_name: elk-cluster-kafka-offset-monitor image: 564239555/kafkaoffsetmonitor:latest restart: always volumes: - /data/deploy/elk/kafkaoffsetmonitor/conf:/kafkaoffsetmonitor ports: - 9001:8080 networks: - elk depends_on: - kafka01 - kafka02 - kafka03 environment: ZK_HOSTS: zk01:2181,zk02:2181,zk03:2181 KAFKA_BROKERS: kafka01:9092,kafka02:9092,kafka03:9092 REFRESH_SECENDS: 10 RETAIN_DAYS: 2 #######################filebeat配置################ filebaet: #镜像名称 image: elastic/filebeat:7.12.0 #容器名称 container_name: elk-cluster-filebaet hostname: filebaet #开机自启动 restart: always volumes: - /data/deploy/elk/filebeat/data:/elk/logs #权限设置 privileged: true #用户 user: root #环境变量设置 environment: #开启读写权限 - "TAKE_FILE_OWNERSHIP=true" ulimits: memlock: soft: -1 hard: -1 #网络配置 networks: - elk depends_on: - kafka01 - kafka02 - kafka03 networks: elk: driver: bridge
注意:
将yaml中的10.0.168.85修改为你自己服务器的ip
将yaml中所有的卷volumes节点挂的目录修改到内存最大的磁盘下
docker-compose -f elk-cluster.yml -p elk-cluster up -d
filebeat.inputs: - type: log enabled: true paths: - /elk/logs/*.log filebeat.config: modules: path: ${path.config}/modules.d/*.yml reload.enabled: false processors: - add_cloud_metadata: ~ - add_docker_metadata: ~ output.kafka: # initial brokers for reading cluster metadata hosts: ["kafka01:9092", "kafka02:9092", "kafka03:9092"] # message topic selection + partitioning topic: 'test' partition.round_robin: reachable_only: false required_acks: 1 compression: gzip max_message_bytes: 1000000
说明:具体的创建过程,可以参考https://blog.csdn.net/yprufeng/article/details/115541404——kafka集群搭建中主题创建的步骤。
说明:这里我们只以elk-cluster-logstash01为例,其它logstash配置同elk-cluster-logstash01
input{ #方式一:直接通过logback收集日志 tcp{ port => 5044 type => "atp" codec => "json_lines" } #方式二:kafka方式 kafka { type => 'kafka' bootstrap_servers => "kafka01:9092,kafka02:9092,kafka03:9092" topics => "test" group_id => "elk" } } output{ #普通方式 if [type] == 'atp'{ elasticsearch { #es地址 hosts => ["es01:9200","es02:9200","es03:9200"] #索引 index => "elk-cluster-logstash-01-%{[appname]}-%{+YYYY.MM.dd}" } } #kafka方式 if [type] == 'kafka'{ elasticsearch { #es地址 hosts => ["es01:9200","es02:9200","es03:9200"] #索引 index => "elk-atp-%{+YYYY.MM.dd}" } } }
server {
listen 80;
server_name kibana;
location / {
proxy_pass http://kibana:5601;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
docker-compose -f elk-cluster.yml -p elk-cluster restart
<!--日志收集 elk -->
<appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<param name="Encoding" value="UTF-8"/>
<remoteHost>10.0.168.85</remoteHost>
<port>5044</port>
<!-- encoder is required -->
<encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder" >
<!--索引-->
<customFields>{"appname":"xxx项目日志"}</customFields>
<!--
<pattern>${log.trace.pattern}</pattern>
-->
</encoder>
</appender>
原文链接:https://blog.csdn.net/yprufeng/article/details/115718441
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。