当前位置:   article > 正文

ELK 使用 Filebeat 收集日志_filebeat 7.12

filebeat 7.12

一、部署 Filebeat

10.0.0.37 安装 filebeat

apt install -y openjdk-8-jdk

# 将 filebeat-7.12.1-amd64.deb 软件包传到 /usr/local/src 目录下,并进行安装
dpkg -i /usr/local/src/filebeat-7.12.1-amd64.deb
  • 1
  • 2
  • 3
  • 4

二、Filebeat -> Redis -> Logstash -> ES

2.1 Filebeat 配置:Nginx 日志文件输出到 Redis

root@web1:/usr/local/src# grep -v "#" /etc/filebeat/filebeat.yml | grep -v "^$"
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /var/log/syslog
  fields:
    type: syslog
- type: log
  enabled: true
  paths:
    - /apps/nginx/logs/error.log
  fields:
    service: nginx-errorlog
filebeat.config.modules:
  path: ${path.config}/modules.d/*.yml
  reload.enabled: false
setup.template.settings:
  index.number_of_shards: 1
setup.kibana:
#output.elasticsearch:
#  hosts: ["10.0.0.31:9200"]

output.redis:
  host: ["10.0.0.35"]
  passwd: "123456"
  key: "lck-nginx"
  db: 0
  timeout: 5
  
processors:
  - add_host_metadata:
      when.not.contains.tags: forwarded
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
  - add_kubernetes_metadata: ~

# 启动服务
systemctl restart filebeat
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39

2.2 Logstash 配置:从 Redis 拉取到 elasticsearch

root@ubuntu1804:~# vim /etc/logstash/conf.d/redis-to-es.conf
input {
  redis {
    data_type => "list"
    key => "lck-nginx"
    host => "10.0.0.34"
    port => "6379"
    db => "1"
    password => "123456"
    threads => "4"
  }
}

output {
  if [fields][service] == "nginx-errorlog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "filebeat-nginx-errorlog-%{+YYYY.MM.dd}"
    }
  }
  if [fields][type] == "syslog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "filebeat-nginx-syslog-%{+YYYY.MM.dd}"
    }
  }
}

# 重启服务
systemctl restart logstash
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30

三、Filebeat -> Kafka -> Logstash -> ES

3.1 Filebeat 配置:Nginx 日志文件输出到 Kafka

vim /etc/filebeat/filebeat.yml
- type: log
  enabled: true
  paths:
    - /var/log/syslog
  fields:
    type: syslog

- type: log
  enabled: true
  paths:
    - /apps/nginx/logs/error.log
  fields:
    service: nginx-errorlog

- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  fields:
    service: nginx-accesslog

output.kafka
  host: ["10.0.0.40:9092","10.0.0.41:9092","10.0.0.42:9092"]
  topic: '%{[fields.log_topic]}'
  partition.round_robin: 
    reachable_only: false
    required_acks: 1
    compression: gzip
    max_message_bytes: 1000000

# 启动服务
systemctl restart filebeat

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34

3.2 Logstash 配置:从 Kafka 拉取到 elasticsearch

root@ubuntu1804:~# vim /etc/logstash/conf.d/kafka-to-es.conf
input {
  kafka {
    bootstrap_servers => "10.0.0.40:9092,10.0.0.41:9092,10.0.0.42:9092"
    topics => ["lck-nginx-accesslog","lck-nginx-errorlog"]
    codec => "json"
  }
}

output {
  if [fields][service] == "nginx-errorlog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "filebeat-nginx-errorlog-%{+YYYY.MM.dd}"
    }
  }
  if [fields][type] == "syslog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "filebeat-nginx-syslog-%{+YYYY.MM.dd}"
    }
  }
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23

四、Filebeat -> Logstash -> Redis -> Logstash -> ES

因为 filebeat 无法处理 JSON 格式的数据,我们利用 logstash 来处理带有 JSON 格式的数据
在这里插入图片描述

4.1 Filebeat 配置:Nginx 日志文件输出到 Logstash

vim /etc/filebeat/filebeat.yml
- type: log
  enabled: true
  paths:
    - /var/log/syslog
  fields:
    type: syslog

- type: log
  enabled: true
  paths:
    - /apps/nginx/logs/error.log
  fields:
    service: nginx-errorlog

- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  fields:
    service: nginx-accesslog

output.logstash
  host: ["10.0.0.36:5044"]

# 启动服务
systemctl restart filebeat
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27

4.2 Logstash配置:将 filebeat 发送过来的数据传输到 Redis

vim /etc/logstash/conf.d/filebeat-to-redis.conf
input {
  beats {
    port => 5044
    codec => "json"
  }
}

output {
  if [fields][type] == "syslog" {
    redis {
      data_type => "list"
      key => "lck-syslog"
      host => "10.0.0.34"
      port => "6379"
      db => "1"
      password => "123456"
    }
  }
  if [fields][service] == "nginx-errorlog" {
    redis {
      data_type => "list"
      key => "lck-nginx-errorlog"
      host => "10.0.0.34"
      port => "6379"
      db => "1"
      password => "123456"
    }
  }
  if [fields][service] == "nginx-accesslog" {
    redis {
      data_type => "list"
      key => "lck-nginx-accesslog"
      host => "10.0.0.34"
      port => "6379"
      db => "1"
      password => "123456"
    }
  }
}

# 检查配置文件语法是否正确
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/filebeat-to-redis.conf -t

# 启动服务
systemctl restart logstash

# 在 Redis 查看数据
redis-cli
select 1
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50

4.3 Logstash配置:将 Redis 数据数据传输到 elasticsearch

root@ubuntu1804:~# vim /etc/logstash/conf.d/redis-to-es.conf
input {
  redis {
    data_type => "list"
    key => "lck-syslog"
    host => "10.0.0.34"
    port => "6379"
    db => "1"
    password => "123456"
    threads => "4"
  }
  redis {
    data_type => "list"
    key => "lck-nginx-accesslog"
    host => "10.0.0.34"
    port => "6379"
    db => "1"
    password => "123456"
    threads => "4"
  }
    redis {
    data_type => "list"
    key => "lck-nginx-errorlog"
    host => "10.0.0.34"
    port => "6379"
    db => "1"
    password => "123456"
    threads => "4"
  }
}

output {
    if [type] == "syslog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "logstash-nginx-syslog-%{+YYYY.MM.dd}"
    }
  }
  if [type] == "nginx-accesslog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "logstash-nginx-accesslog-%{+YYYY.MM.dd}"
    }
  }
  if [type] == "nginx-errorlog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "logstash-nginx-errorlog-%{+YYYY.MM.dd}"
    }
  }
}

# 注意事项:index的字段中,logstash开头表示可以显示客户端IP归属地,在es中地图可以查看

# 检查配置文件语法是否正确
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/redis-to-es.conf -t

# 启动服务
systemctl restart logstash
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59

五、Filebeat -> Logstash -> Kafka -> Logstash -> ES

在这里插入图片描述

5.1 Filebeat 配置:Nginx 日志文件输出到 Logstash

vim /etc/filebeat/filebeat.yml
- type: log
  enabled: true
  paths:
    - /var/log/syslog
  fields:
    type: syslog

- type: log
  enabled: true
  paths:
    - /apps/nginx/logs/error.log
  fields:
    service: nginx-errorlog

- type: log
  enabled: true
  paths:
    - /var/log/nginx/access.log
  fields:
    service: nginx-accesslog

output.logstash
  hosts: ["10.0.0.36:5044","10.0.0.36:5045"]
  enabled: true
  worker: 1
  compression_level: 3
  loadbalance: true

# 启动服务
systemctl restart filebeat

# 查看服务是否启动
systemctl status filebeat.service
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34

5.2 Logstash配置:将 filebeat 发送过来的数据传输到 Kafka

vim /etc/logstash/conf.d/filebeat-to-kafka.conf
input {
  beats {
    port => 5044
    codec => "json"
  }
}

output {
  if [fields][type] == "syslog" {
    kafka {
      bootstrap_servers => "10.0.0.40:9092,10.0.0.41:9092,10.0.0.42:9092"
      topic_id => "lck-50-syslog"
      codec => "json"
    }
  }
  if [fields][service] == "nginx-errorlog" {
    kafka {
      bootstrap_servers => "10.0.0.40:9092,10.0.0.41:9092,10.0.0.42:9092"
      topic_id => "lck-50-nginx-aerrorlog"
      codec => "json"
    }
  }
  if [fields][service] == "nginx-accesslog" {
    kafka {
      bootstrap_servers => "10.0.0.40:9092,10.0.0.41:9092,10.0.0.42:9092"
      topic_id => "lck-50-nginx-accesslog"
      codec => "json"
    }
  }
}

# 检查配置文件语法是否正确
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/filebeat-to-kafka.conf -t

# 启动服务
systemctl restart logstash
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

查看kafka是否有数据
在这里插入图片描述

5.3 Logstash配置:将 Kafka 数据数据传输到 elasticsearch

root@ubuntu1804:~# vim /etc/logstash/conf.d/kafka-to-es.conf
input {
  kafka {
    bootstrap_servers => "10.0.0.40:9092,10.0.0.41:9092,10.0.0.42:9092"
    topics => ["lck-50-syslog","lck-50-nginx-errorlog","lck-50-nginx-accesslog"]
    codec => "json"
  }
}

output {
    if [fields][type] == "syslog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "kafka-0901-syslog-%{+YYYY.MM.dd}"
    }
  }
  if [fields][service] == "nginx-accesslog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "kafka-0901-nginx-accesslog-%{+YYYY.MM.dd}"
    }
  }
  if [fields][service] == "nginx-errorlog" {
    elasticsearch {
      hosts => ["10.0.0.31:9200"]
      index => "lkafka-0901-nginx-errorlog-%{+YYYY.MM.dd}"
    }
  }
}

# 注意事项:index的字段中,logstash开头表示可以显示客户端IP归属地,在es中地图可以查看

# 检查配置文件语法是否正确
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/kafka-to-es.conf -t

# 启动服务
systemctl restart logstash
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

验证数据是否写入 elasticsearch
在这里插入图片描述

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/很楠不爱3/article/detail/706271
推荐阅读
相关标签
  

闽ICP备14008679号