当前位置:   article > 正文

docker-compose 搭建 ELK_docker-compose elk

docker-compose elk

修改系统句柄数

说明:linux系统默认的可操作句柄数是65535,es集群默认的进程句柄数需要至少为262144个,如果我们想正常启动es集群,我们需要调大这个参数。

在这里插入图片描述

  • 修改docker-compose容器启动时间

说明:在使用docker-compose启动多个容器时,在其默认的启动时间60s内无法全部启动完成,容器就会整个启动失败。这里我们将此参数调大到1000s。使用vi编辑器修改系统环境变量文件/etc/profile,在文件的末尾添加俩个参数,然后更新系统参数,使新添加的参数配置生效。

参数:

export DOCKER_CLIENT_TIMEOUT=1000
export COMPOSE_HTTP_TIMEOUT=1000

在这里插入图片描述

创建elk-cluster.yml配置文件

#elk-cluster集群配置文件
#version: "3.3"
 
services:
  #######################elasticsearch集群配置################
  es01:
    #镜像名称
    image: elasticsearch:7.12.0
    #容器名称
    container_name: elk-cluster-es01
    hostname: es01
    #开机自启动
    restart: always
    privileged: true
    #环境变量设置
    environment:
      #节点名称
      - node.name=es01
      #集群名称
      - cluster.name=elk-cluster-es
      #其它节点
      - discovery.seed_hosts=es02,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      #java堆内存大小设置
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      #开启读写权限
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #数据卷映射
    volumes:
      - /data/deploy/elk/elasticsearch/01/data:/usr/share/elasticsearch/data
      - /data/deploy/elk/elasticsearch/01/logs:/usr/share/elasticsearch/logs
    #端口映射
    ports:
      - 9200:9200
    #网络配置
    networks:
      - elk
      
  es02:
    image: elasticsearch:7.12.0
    container_name: elk-cluster-es02
    hostname: es02
    restart: always
    privileged: true
    environment:
      - node.name=es02
      - cluster.name=elk-cluster-es
      - discovery.seed_hosts=es01,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /data/deploy/elk/elasticsearch/02/data:/usr/share/elasticsearch/data
      - /data/deploy/elk/elasticsearch/02/logs:/usr/share/elasticsearch/logs
    #网络配置
    networks:
      - elk
      
  es03:
    image: elasticsearch:7.12.0
    container_name: elk-cluster-es03
    hostname: es03
    restart: always
    privileged: true
    environment:
      - node.name=es03
      - cluster.name=elk-cluster-es
      - discovery.seed_hosts=es01,es02
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /data/deploy/elk/elasticsearch/03/data:/usr/share/elasticsearch/data
      - /data/deploy/elk/elasticsearch/03/logs:/usr/share/elasticsearch/logs
    #端口映射
    networks:
      - elk
      
  #####################kibana配置####################################
  kibana:
    image: kibana:7.12.0
    container_name: elk-cluster-kibana
    hostname: kibana
    restart: always
    environment:
      #elasticsearch服务地址
      ELASTICSEARCH_HOSTS: "http://es01:9200"
      #kibana语言配置:en、zh-CN、ja-JP
      I18N_LOCALE: "zh-CN"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #端口映射
    ports:
      - 5601:5601
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  #####################kibana配置####################################
  nginx:
    image: nginx:stable-alpine-perl
    container_name: elk-cluster-nginx
    hostname: nginx
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #端口映射
    ports:
      - 88:80
    networks:
      - elk
    depends_on:
      - kibana
      
  #####################logstash配置####################################
  logstash01:
    image: logstash:7.12.0
    container_name: elk-cluster-logstash01
    hostname: logstash01
    restart: always
    environment:
      #elasticsearch服务地址
      - monitoring.elasticsearch.hosts="http://es01:9200"
    ports:
      - 9600:9600
      - 5044:5044
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  logstash02:
    image: logstash:7.12.0
    container_name: elk-cluster-logstash02
    hostname: logstash02
    restart: always
    environment:
      #elasticsearch服务地址
      - monitoring.elasticsearch.hosts="http://es01:9200"
    ports:
      - 9601:9600
      - 5045:5044
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  logstash03:
    image: logstash:7.12.0
    container_name: elk-cluster-logstash03
    hostname: logstash03
    restart: always
    environment:
      #elasticsearch服务地址
      - monitoring.elasticsearch.hosts="http://es01:9200"
    ports:
      - 9602:9600
      - 5046:5044
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  #####################kafka集群相关配置####################################
  #zookeeper集群
  zk01:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk01
    hostname: zk01
    ports:
      - 2181:2181
    networks:
      - elk
    volumes:
      - "/data/deploy/elk/zookeeper/zk01/data:/data"
      - "/data/deploy/elk/zookeeper/zk01/logs:/datalog"
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zk02:2888:3888;2181 server.3=zk03:2888:3888;2181
    depends_on:
      - es01
      - es02
      - es03
 
  zk02:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk02
    hostname: zk02
    ports:
      - 2182:2181
    networks:
      - elk
    volumes:
      - "/data/deploy/elk/zookeeper/zk02/data:/data"
      - "/data/deploy/elk/zookeeper/zk02/logs:/datalog"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zk01:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zk03:2888:3888;2181
    depends_on:
      - es01
      - es02
      - es03
 
  zk03:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk03
    hostname: zk03
    ports:
      - 2183:2181
    networks:
      - elk
    volumes:
      - "/data/deploy/elk/zookeeper/zk03/data:/data"
      - "/data/deploy/elk/zookeeper/zk03/logs:/datalog"
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zk01:2888:3888;2181 server.2=zk02:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
    depends_on:
      - es01
      - es02
      - es03
 
  #kafka集群
  kafka01:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka01
    hostname: kafka01
    ports:
      - "9091:9092"
      - "9991:9991"
    networks:
      - elk
    depends_on:
      - zk01
      - zk02
      - zk03
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ADVERTISED_HOST_NAME: kafka01
      KAFKA_ADVERTISED_PORT: 9091
      KAFKA_HOST_NAME: kafka01
      KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka01:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.168.85:9091
      JMX_PORT: 9991
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka01 -Dcom.sun.management.jmxremote.port=9991 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/data/deploy/elk/kafka/kafka01/:/kafka"
 
  kafka02:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka02
    hostname: kafka02
    ports:
      - "9092:9092"
      - "9992:9992"
    networks:
      - elk
    depends_on:
      - zk01
      - zk02
      - zk03
    environment:
      KAFKA_BROKER_ID: 2
      KAFKA_ADVERTISED_HOST_NAME: kafka02
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_HOST_NAME: kafka02
      KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka02:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.168.85:9092
      JMX_PORT: 9992
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka02 -Dcom.sun.management.jmxremote.port=9992 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/data/deploy/elk/kafka/kafka02/:/kafka"
 
  kafka03:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka03
    hostname: kafka03
    ports:
      - "9093:9092"
      - "9993:9993"
    networks:
      - elk
    depends_on:
      - zk01
      - zk02
      - zk03
    environment:
      KAFKA_BROKER_ID: 3
      KAFKA_ADVERTISED_HOST_NAME: kafka03
      KAFKA_ADVERTISED_PORT: 9093
      KAFKA_HOST_NAME: kafka03
      KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka03:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.23.134:9093
      JMX_PORT: 9993
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka03 -Dcom.sun.management.jmxremote.port=9993 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/data/deploy/elk/kafka/kafka3/:/kafka"
 
  #kafka管理工具
  'kafka-manager':
    container_name: elk-cluster-kafka-manager
    image: sheepkiller/kafka-manager:stable
    restart: always
    ports:
      - 9000:9000
    networks:
      - elk
    depends_on:
      - kafka01
      - kafka02
      - kafka03
    environment:
      KM_VERSION: 1.3.3.18
      ZK_HOSTS: zk01:2181,zk02:2181,zk03:2181
 
  #kafka监控工具
  'kafka-offset-monitor':
    container_name: elk-cluster-kafka-offset-monitor
    image: 564239555/kafkaoffsetmonitor:latest
    restart: always
    volumes:
      - /data/deploy/elk/kafkaoffsetmonitor/conf:/kafkaoffsetmonitor
    ports:
      - 9001:8080
    networks:
      - elk
    depends_on:
      - kafka01
      - kafka02
      - kafka03
    environment:
      ZK_HOSTS: zk01:2181,zk02:2181,zk03:2181
      KAFKA_BROKERS: kafka01:9092,kafka02:9092,kafka03:9092
      REFRESH_SECENDS: 10
      RETAIN_DAYS: 2
      
  #######################filebeat配置################
  filebaet:
    #镜像名称
    image: elastic/filebeat:7.12.0
    #容器名称
    container_name: elk-cluster-filebaet
    hostname: filebaet
    #开机自启动
    restart: always
    volumes:
      - /data/deploy/elk/filebeat/data:/elk/logs
    #权限设置
    privileged: true
    #用户
    user: root
    #环境变量设置
    environment:
      #开启读写权限
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #网络配置
    networks:
      - elk 
    depends_on:
      - kafka01
      - kafka02
      - kafka03
      
networks:
  elk:
    driver: bridge

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
  • 187
  • 188
  • 189
  • 190
  • 191
  • 192
  • 193
  • 194
  • 195
  • 196
  • 197
  • 198
  • 199
  • 200
  • 201
  • 202
  • 203
  • 204
  • 205
  • 206
  • 207
  • 208
  • 209
  • 210
  • 211
  • 212
  • 213
  • 214
  • 215
  • 216
  • 217
  • 218
  • 219
  • 220
  • 221
  • 222
  • 223
  • 224
  • 225
  • 226
  • 227
  • 228
  • 229
  • 230
  • 231
  • 232
  • 233
  • 234
  • 235
  • 236
  • 237
  • 238
  • 239
  • 240
  • 241
  • 242
  • 243
  • 244
  • 245
  • 246
  • 247
  • 248
  • 249
  • 250
  • 251
  • 252
  • 253
  • 254
  • 255
  • 256
  • 257
  • 258
  • 259
  • 260
  • 261
  • 262
  • 263
  • 264
  • 265
  • 266
  • 267
  • 268
  • 269
  • 270
  • 271
  • 272
  • 273
  • 274
  • 275
  • 276
  • 277
  • 278
  • 279
  • 280
  • 281
  • 282
  • 283
  • 284
  • 285
  • 286
  • 287
  • 288
  • 289
  • 290
  • 291
  • 292
  • 293
  • 294
  • 295
  • 296
  • 297
  • 298
  • 299
  • 300
  • 301
  • 302
  • 303
  • 304
  • 305
  • 306
  • 307
  • 308
  • 309
  • 310
  • 311
  • 312
  • 313
  • 314
  • 315
  • 316
  • 317
  • 318
  • 319
  • 320
  • 321
  • 322
  • 323
  • 324
  • 325
  • 326
  • 327
  • 328
  • 329
  • 330
  • 331
  • 332
  • 333
  • 334
  • 335
  • 336
  • 337
  • 338
  • 339
  • 340
  • 341
  • 342
  • 343
  • 344
  • 345
  • 346
  • 347
  • 348
  • 349
  • 350
  • 351
  • 352
  • 353
  • 354
  • 355
  • 356
  • 357
  • 358
  • 359
  • 360
  • 361
  • 362
  • 363
  • 364
  • 365
  • 366
  • 367
  • 368
  • 369
  • 370
  • 371
  • 372
  • 373
  • 374
  • 375
  • 376
  • 377
  • 378
  • 379
  • 380
  • 381
  • 382
  • 383
  • 384
  • 385
  • 386
  • 387
  • 388
  • 389
  • 390
  • 391
  • 392
  • 393
  • 394
  • 395
  • 396
  • 397
  • 398
  • 399
  • 400
  • 401
  • 402
  • 403
  • 404
  • 405
  • 406
  • 407
  • 408
  • 409
  • 410
  • 411
  • 412
  • 413
  • 414
  • 415
  • 416
  • 417
  • 注意:

  • 将yaml中的10.0.168.85修改为你自己服务器的ip

  • 将yaml中所有的卷volumes节点挂的目录修改到内存最大的磁盘下

启动elk集群

docker-compose -f elk-cluster.yml -p elk-cluster  up -d
  • 1

在这里插入图片描述

修改filebeat配置文件

filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /elk/logs/*.log
 
filebeat.config:
  modules:
    path: ${path.config}/modules.d/*.yml
    reload.enabled: false
 
processors:
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
 
output.kafka:
  # initial brokers for reading cluster metadata
  hosts: ["kafka01:9092", "kafka02:9092", "kafka03:9092"]
 
  # message topic selection + partitioning
  topic: 'test'
  partition.round_robin:
    reachable_only: false
 
  required_acks: 1
  compression: gzip
  max_message_bytes: 1000000
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27

在这里插入图片描述

创建kafka的主题test

说明:具体的创建过程,可以参考https://blog.csdn.net/yprufeng/article/details/115541404——kafka集群搭建中主题创建的步骤。
在这里插入图片描述

修改logstash配置文件

说明:这里我们只以elk-cluster-logstash01为例,其它logstash配置同elk-cluster-logstash01

input{
       #方式一:直接通过logback收集日志 
       tcp{
           port => 5044
           type => "atp"
           codec => "json_lines"
       }
       #方式二:kafka方式
       kafka {
           type => 'kafka'
           bootstrap_servers => "kafka01:9092,kafka02:9092,kafka03:9092"
           topics => "test"
           group_id => "elk"
    }
}
 
output{
    #普通方式
    if [type] == 'atp'{
        elasticsearch {
            #es地址
            hosts => ["es01:9200","es02:9200","es03:9200"]
            #索引
            index => "elk-cluster-logstash-01-%{[appname]}-%{+YYYY.MM.dd}"
        }
    }
    #kafka方式
    if [type] == 'kafka'{
        elasticsearch {
            #es地址
            hosts => ["es01:9200","es02:9200","es03:9200"]
            #索引
            index => "elk-atp-%{+YYYY.MM.dd}"
        }
    }
 
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

在这里插入图片描述

修改nginx配置

server {
        listen 80;
        server_name kibana;
        location / {
                proxy_pass http://kibana:5601;
                proxy_http_version 1.1;
                proxy_set_header Upgrade $http_upgrade;
                proxy_set_header Connection 'upgrade';
                proxy_set_header Host $host;
                proxy_cache_bypass $http_upgrade;
        }
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

在这里插入图片描述

重启服务

docker-compose -f elk-cluster.yml -p elk-cluster restart

在这里插入图片描述

logBack配置

<!--日志收集 elk -->
    <appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <param name="Encoding" value="UTF-8"/>
        <remoteHost>10.0.168.85</remoteHost>
        <port>5044</port>
        <!-- encoder is required -->
        <encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder" >
            <!--索引-->
            <customFields>{"appname":"xxx项目日志"}</customFields>
            <!-- 
            <pattern>${log.trace.pattern}</pattern>
            -->
        </encoder>
    </appender>
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14

kibana配置

在这里插入图片描述
在这里插入图片描述


在这里插入图片描述
在这里插入图片描述

原文链接:https://blog.csdn.net/yprufeng/article/details/115718441

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小惠珠哦/article/detail/992902
推荐阅读
相关标签
  

闽ICP备14008679号