赞
踩
鉴于公司服务器上日志越来越多,需要搭建ELK对日志进行采集和分析,用于后续故障排查。
程序下载地址:
elasticsearch:Elasticsearch 8.12.0 | Elastic
logstash:Logstash 8.12.0 | Elastic
kibana:Kibana 8.12.0 | Elastic
filebeat:Filebeat 8.12.0 | Elastic
角色 | 主机地址 | |
---|---|---|
es1 | 192.168.133.200 | elasticsearch集群 |
es2 | 192.168.133.201 | elasticsearch集群 |
es3 | 192.168.133.202 | elasticsearch集群 |
kibana | 192.168.133.203 | kibana部署地址 |
logstash,filebeat | 192.168.133.204 | logstash以及filebeat |
- vim /etc/security/limits.conf
- ---------------------------
- * soft nofile 65536
- * hard nofile 65536
- * soft nproc 32000
- * hard nproc 32000
- * hard memlock unlimited
- * soft memlock unlimited
- ---------------------------
-
- vim /etc/systemd/system.conf
- ---------------------------
- DefaultLimitNOFILE=65536
- DefaultLimitNPROC=32000
- DefaultLimitMEMLOCK=infinity
- ---------------------------
-
- vim /etc/sysctl.conf
- ---------------------------
- vm.max_map_count=655360
- fs.file-max = 655360
- ---------------------------
- sysctl -p
-
- vim /etc/hosts
- ---------------------------
- 192.168.133.200
- 192.168.133.201
- 192.168.133.202
- ---------------------------
- vim /etc/profile
- --------末尾添加如下内容------------
- export HISTSIZE=3000
- export HISTFILESIZE=10000
- export PATH
- export JAVA_HOME=/app/elasticsearch/jdk
- export PATH=$JAVA_HOME/bin:$PATH
- export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
- --------配置完成------------------
- source /etc/profile
elasticsearch程序部署的路径为/app/elasticsearch
- ## 搭建集群相关的配置,配置文件elasticsearch.yml
- vim /app/elasticsearch/config/elasticsearch.yml
- -----------------------------
- cluster.name: test
- node.name: node1
- network.host: 192.168.133.200
- bootstrap.memory_lock: true
- path.data: /data/elasticsearch/data
- path.logs: /data/elasticsearch/logs
- http.port: 9200
- transport.port: 9300
- discovery.seed_hosts: ["192.168.133.200:9300","192.168.133.201:9300","192.168.133.202:9300"]
- cluster.initial_master_nodes: ["node1", "node2","node3"]
- http.cors.enabled: true
- http.cors.allow-origin: '*'
- action.auto_create_index: true
- -----------------------------
-
- ## 创建用户es,并将家目录设置为程序目录/app/elasticsearch
- groupadd es
- useradd -g es -p es -d /app/elasticsearch es
- chmod 755 -R /app/elasticsearch
- ## 复制文件至家目录,避免出现(-bash-4.2$)
- cp /etc/skel/.bash_logout /app/elasticsearch/
- cp /etc/skel/.bash_profile /app/elasticsearch/
- cp /etc/skel/.bashrc /app/elasticsearch/
- ## 创建路径
- mkdir -p /data/elasticsearch/data
- mkdir -p /data/elasticsearch/logs
- mkdir -p /data/supervisord/
- chown -R es:es /data/elasticsearch/
- chown -R es:es /app/elasticsearch
- chmod 755 -R /app/elasticsearch
-
- ## systemd配置
- vim /etc/systemd/system/elasticsearch.service
- ---------------------
- [Unit]
- Description=elasticsearch
- After=network.target
- Wants=network.target
-
- [Service]
- Type=simple
- User=es
- Group=es
- LimitNOFILE=655350
- LimitNPROC=320000
- LimitMEMLOCK=infinity
- Restart=on-failure
- ExecStart=/app/elasticsearch/bin/elasticsearch
- ExecReload=/bin/kill -HUP $MAINPID
- PrivateTmp=true
- KillMode=mixed
-
- [Install]
- WantedBy=multi-user.target
- ---------------------
-
- ## 脚本配置
- mkdir /xyz
-
- cat >> /xyz/1-start.sh << EOF
- #!/bin/bash
- ## 启动脚本
- systemctl start elasticsearch.service
- EOF
-
- cat >> /xyz/2-ps.sh << EOF
- #!/bin/bash
- ## 检查状态
- systemctl --no-pager status elasticsearch.service
- EOF
-
- cat >> /xyz/3-stop.sh << EOF
- #!/bin/bash
- ## 停止脚本
- systemctl stop elasticsearch.service
- EOF
-
- cat >> /xyz/4-restart.sh << EOF
- #!/bin/bash
- ## 重启脚本
- systemctl restart elasticsearch.service
- EOF
-
- cat >> /xyz/5-update.sh << EOF
- #!/bin/bash
- ## 更新脚本
- systemctl daemon-reload
- EOF
-
- chmod -R 755 /xyz/
elasticsearch程序部署的路径为/app/elasticsearch
- ## 搭建集群相关的配置,配置文件elasticsearch.yml
- vim /app/elasticsearch/config/elasticsearch.yml
- -----------------------------
- cluster.name: test
- node.name: node2
- network.host: 192.168.133.201
- bootstrap.memory_lock: true
- path.data: /data/elasticsearch/data
- path.logs: /data/elasticsearch/logs
- http.port: 9200
- transport.port: 9300
- discovery.seed_hosts: ["192.168.133.200:9300","192.168.133.201:9300","192.168.133.202:9300"]
- cluster.initial_master_nodes: ["node1", "node2","node3"]
- http.cors.enabled: true
- http.cors.allow-origin: '*'
- action.auto_create_index: true
- -----------------------------
-
- ## 创建用户es,并将家目录设置为程序目录/app/elasticsearch
- groupadd es
- useradd -g es -p es -d /app/elasticsearch es
- chmod 755 -R /app/elasticsearch
- ## 复制文件至家目录,避免出现(-bash-4.2$)
- cp /etc/skel/.bash_logout /app/elasticsearch/
- cp /etc/skel/.bash_profile /app/elasticsearch/
- cp /etc/skel/.bashrc /app/elasticsearch/
- ## 创建路径
- mkdir -p /data/elasticsearch/data
- mkdir -p /data/elasticsearch/logs
- mkdir -p /data/supervisord/
- chown -R es:es /data/elasticsearch/
- chown -R es:es /app/elasticsearch
- chmod 755 -R /app/elasticsearch
-
- ## systemd配置
- vim /etc/systemd/system/elasticsearch.service
- ---------------------
- [Unit]
- Description=elasticsearch
- After=network.target
- Wants=network.target
-
- [Service]
- Type=simple
- User=es
- Group=es
- LimitNOFILE=655350
- LimitNPROC=320000
- LimitMEMLOCK=infinity
- Restart=on-failure
- ExecStart=/app/elasticsearch/bin/elasticsearch
- ExecReload=/bin/kill -HUP $MAINPID
- PrivateTmp=true
- KillMode=mixed
-
- [Install]
- WantedBy=multi-user.target
- ---------------------
-
- ## 脚本配置
- mkdir /xyz
-
- cat >> /xyz/1-start.sh << EOF
- #!/bin/bash
- ## 启动脚本
- systemctl start elasticsearch.service
- EOF
-
- cat >> /xyz/2-ps.sh << EOF
- #!/bin/bash
- ## 检查状态
- systemctl --no-pager status elasticsearch.service
- EOF
-
- cat >> /xyz/3-stop.sh << EOF
- #!/bin/bash
- ## 停止脚本
- systemctl stop elasticsearch.service
- EOF
-
- cat >> /xyz/4-restart.sh << EOF
- #!/bin/bash
- ## 重启脚本
- systemctl restart elasticsearch.service
- EOF
-
- cat >> /xyz/5-update.sh << EOF
- #!/bin/bash
- ## 更新脚本
- systemctl daemon-reload
- EOF
-
- chmod -R 755 /xyz/
elasticsearch程序部署的路径为/app/elasticsearch
- ## 搭建集群相关的配置,配置文件elasticsearch.yml
- vim /app/elasticsearch/config/elasticsearch.yml
- -----------------------------
- cluster.name: test
- node.name: node3
- network.host: 192.168.133.202
- bootstrap.memory_lock: true
- path.data: /data/elasticsearch/data
- path.logs: /data/elasticsearch/logs
- http.port: 9200
- transport.port: 9300
- discovery.seed_hosts: ["192.168.133.200:9300","192.168.133.201:9300","192.168.133.202:9300"]
- cluster.initial_master_nodes: ["node1", "node2","node3"]
- http.cors.enabled: true
- http.cors.allow-origin: '*'
- action.auto_create_index: true
- -----------------------------
-
- ## 创建用户es,并将家目录设置为程序目录/app/elasticsearch
- groupadd es
- useradd -g es -p es -d /app/elasticsearch es
- chmod 755 -R /app/elasticsearch
- ## 复制文件至家目录,避免出现(-bash-4.2$)
- cp /etc/skel/.bash_logout /app/elasticsearch/
- cp /etc/skel/.bash_profile /app/elasticsearch/
- cp /etc/skel/.bashrc /app/elasticsearch/
- ## 创建路径
- mkdir -p /data/elasticsearch/data
- mkdir -p /data/elasticsearch/logs
- mkdir -p /data/supervisord/
- chown -R es:es /data/elasticsearch/
- chown -R es:es /app/elasticsearch
- chmod 755 -R /app/elasticsearch
-
- ## systemd配置
- vim /etc/systemd/system/elasticsearch.service
- ---------------------
- [Unit]
- Description=elasticsearch
- After=network.target
- Wants=network.target
-
- [Service]
- Type=simple
- User=es
- Group=es
- LimitNOFILE=655350
- LimitNPROC=320000
- LimitMEMLOCK=infinity
- Restart=on-failure
- ExecStart=/app/elasticsearch/bin/elasticsearch
- ExecReload=/bin/kill -HUP $MAINPID
- PrivateTmp=true
- KillMode=mixed
-
- [Install]
- WantedBy=multi-user.target
- ---------------------
-
- ## systemd脚本配置
- mkdir /xyz
-
- cat >> /xyz/1-start.sh << EOF
- #!/bin/bash
- ## 启动脚本
- systemctl start elasticsearch.service
- EOF
-
- cat >> /xyz/2-ps.sh << EOF
- #!/bin/bash
- ## 检查状态
- systemctl --no-pager status elasticsearch.service
- EOF
-
- cat >> /xyz/3-stop.sh << EOF
- #!/bin/bash
- ## 停止脚本
- systemctl stop elasticsearch.service
- EOF
-
- cat >> /xyz/4-restart.sh << EOF
- #!/bin/bash
- ## 重启脚本
- systemctl restart elasticsearch.service
- EOF
-
- cat >> /xyz/5-update.sh << EOF
- #!/bin/bash
- ## 更新脚本
- systemctl daemon-reload
- EOF
-
- chmod -R 755 /xyz/
当为使用生产许可证运行的集群启用Elasticsearch安全性时,必须使用 TLS/SSL 进行传输通信,并且必须正确设置。此外,一旦启用安全性,与 Elasticsearch 集群的所有通信都必须经过身份验证,包括来自 Kibana 和/或应用程序服务器的通信。
Kibana 和/或应用程序服务器向 Elasticsearch 集群进行身份验证的最简单方法是在其配置文件或源代码中嵌入用户名和密码。但是,在许多组织中,禁止在此类位置存储用户名和密码。在这种情况下,一种替代方法是使用公钥基础设施 (PKI - Public Key Infrastructure)(客户端证书)对 Elasticsearch 集群进行身份验证。
Elasticsearch 附带一个名为 elasticsearch-certutil 的实用程序,可用于生成自签名证书,该证书可用于加密 Elasticsearch 集群内的内部通信。
建立加密时注意以下内容:
第一步中Enter password for CA (elastic-stack-ca.p12) :,设置CA文件的密码,可以直接回车不用输入密码,这里为了安全起见还是设置了密码,为了便于搭建集群中所有的和系统安全有关的密码均相同。
第二步Please enter the desired output file [elastic-certificates.p12]:,设置CA授权文件的文件名,使用默认文件名的话可以直接回车。
第三步Enter password for elastic-certificates.p12 :,设置CA授权文件的密码,建议和CA文件密码保持一致。
- cd /app/elasticsearch/
- ## 生成可用于传输通信的证书
- [es@es1 elasticsearch]$ bin/elasticsearch-certutil ca
- warning: ignoring JAVA_HOME=/app/elasticsearch/jdk; using bundled JDK
- This tool assists you in the generation of X.509 certificates and certificate
- signing requests for use with SSL/TLS in the Elastic stack.
-
- The 'ca' mode generates a new 'certificate authority'
- This will create a new X.509 certificate and private key that can be used
- to sign certificate when running in 'cert' mode.
-
- Use the 'ca-dn' option if you wish to configure the 'distinguished name'
- of the certificate authority
-
- By default the 'ca' mode produces a single PKCS#12 output file which holds:
- * The CA certificate
- * The CA's private key'
-
- If you elect to generate PEM format certificates (the -pem option), then the output will
- be a zip file containing individual files for the CA certificate and private key
-
- Please enter the desired output file [elastic-stack-ca.p12]:
- Enter password for elastic-stack-ca.p12 :
- [es@es1 elasticsearch]$ ls
- bin config elastic-stack-ca.p12 jdk lib LICENSE.txt logs modules NOTICE.txt plugins README.asciidoc
-
- ## 根据生成的ca证书,生成
- [es@es1 elasticsearch]$ bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
- warning: ignoring JAVA_HOME=/app/elasticsearch/jdk; using bundled JDK
- This tool assists you in the generation of X.509 certificates and certificate
- signing requests for use with SSL/TLS in the Elastic stack.
-
- The 'cert' mode generates X.509 certificate and private keys.
- * By default, this generates a single certificate and key for use
- on a single instance.
- * The '-multiple' option will prompt you to enter details for multiple
- instances and will generate a certificate and key for each one
- * The '-in' option allows for the certificate generation to be automated by describing
- the details of each instance in a YAML file
-
- * An instance is any piece of the Elastic Stack that requires an SSL certificate.
- Depending on your configuration, Elasticsearch, Logstash, Kibana, and Beats
- may all require a certificate and private key.
- * The minimum required value for each instance is a name. This can simply be the
- hostname, which will be used as the Common Name of the certificate. A full
- distinguished name may also be used.
- * A filename value may be required for each instance. This is necessary when the
- name would result in an invalid file or directory name. The name provided here
- is used as the directory name (within the zip) and the prefix for the key and
- certificate files. The filename is required if you are prompted and the name
- is not displayed in the prompt.
- * IP addresses and DNS names are optional. Multiple values can be specified as a
- comma separated string. If no IP addresses or DNS names are provided, you may
- disable hostname verification in your SSL configuration.
-
-
- * All certificates generated by this tool will be signed by a certificate authority (CA)
- unless the --self-signed command line option is specified.
- The tool can automatically generate a new CA for you, or you can provide your own with
- the --ca or --ca-cert command line options.
-
-
- By default the 'cert' mode produces a single PKCS#12 output file which holds:
- * The instance certificate
- * The private key for the instance certificate
- * The CA certificate
-
- If you specify any of the following options:
- * -pem (PEM formatted output)
- * -multiple (generate multiple certificates)
- * -in (generate certificates from an input file)
- then the output will be be a zip file containing individual certificate/key files
-
- Enter password for CA (elastic-stack-ca.p12) :
- Please enter the desired output file [elastic-certificates.p12]:
- Enter password for elastic-certificates.p12 :
-
- Certificates written to /app/elasticsearch/elastic-certificates.p12
-
- This file should be properly secured as it contains the private key for
- your instance.
- This file is a self contained file and can be copied and used 'as is'
- For each Elastic product that you wish to configure, you should copy
- this '.p12' file to the relevant configuration directory
- and then follow the SSL configuration instructions in the product guide.
-
- For client applications, you may only need to copy the CA certificate and
- configure the client to trust this certificate.
-
- ## 在当前的目录中生产一个叫做 elastic-certificates.p12 的文件,这个文件用于加密通信的 TLS/SSL 证书。
- [es@es1 elasticsearch]$ ls -al
- total 2248
- drwxr-xr-x 9 es es 4096 Mar 28 16:21 .
- drwxr-xr-x 4 root root 4096 Mar 28 11:46 ..
- drwxr-xr-x 2 es es 4096 Jan 11 18:11 bin
- drwxr-xr-x 3 es es 4096 Mar 28 16:00 config
- -rw------- 1 es es 3596 Mar 28 16:21 elastic-certificates.p12
- -rw------- 1 es es 2672 Mar 28 16:20 elastic-stack-ca.p12
- drwxr-xr-x 8 es es 4096 Jan 11 18:11 jdk
- drwxr-xr-x 5 es es 4096 Jan 11 18:11 lib
- -rwxr-xr-x 1 es es 3860 Jan 11 18:04 LICENSE.txt
- drwxr-xr-x 2 es es 4096 Mar 28 15:59 logs
- drwxr-xr-x 81 es es 4096 Jan 11 18:11 modules
- -rwxr-xr-x 1 es es 2239562 Jan 11 18:07 NOTICE.txt
- drwxr-xr-x 2 es es 4096 Jan 11 18:06 plugins
- -rwxr-xr-x 1 es es 8426 Jan 11 18:04 README.asciidoc
-
- ## 建立证书存放目录
- [es@es1 elasticsearch]$ pwd
- /app/elasticsearch
- [es@es1 elasticsearch]$ mkdir config/certs
-
- ## 移动证书至存放目录
- [es@es1 elasticsearch]$ mv elastic-* config/certs/
- [es@es1 elasticsearch]$ ls -al /app/elasticsearch/config/certs/
- total 16
- drwxr-xr-x 2 es es 4096 Mar 28 16:32 .
- drwxr-xr-x 4 es es 4096 Mar 28 16:31 ..
- -rw------- 1 es es 3596 Mar 28 16:21 elastic-certificates.p12
- -rw------- 1 es es 2672 Mar 28 16:20 elastic-stack-ca.p12
-
- ## 修改elasticsearch配置文件(所有节点)
- vim /app/elasticsearch/config/elasticsearch.yml
- ---------------------
- xpack.security.enabled: true
- xpack.security.transport.ssl.enabled: true
- xpack.security.transport.ssl.verification_mode: certificate
- xpack.security.transport.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.transport.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- ---------------------
- ## 将/app/elasticsearch/config/certs/elastic-certificates.p12,以及文件/app/elasticsearch/config/certs/elastic-stack-ca.p12传输至其他节点,并且按照相同的配置修改elasticsearch配置文件
对于 HTTP 通信,Elasticsearch 节点将仅充当服务器,因此可以使用服务器证书。HTTP TLS/SSL 证书不需要启用客户端身份验证。
用于加密 HTTP 通信的证书可以完全独立于用于传输通信(transport communication)的证书。
方便起见也可以使用与传输通信相同的证书进行 HTTP 通信。
- ## 使用与传输通信相同的证书进行 HTTP 通信,修改elasticsearch配置文件
- vim /app/elasticsearch/config/elasticsearch.yml
- ---------------------
- xpack.security.http.ssl.enabled: true
- xpack.security.http.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.verification_mode: certificate
- xpack.security.http.ssl.client_authentication: optional
- ---------------------
本次搭建所有密码均一致,即加密CA文件时使用的密码。
- ## 需要在每个节点上执行,如果在创建证书的过程中加了密码,需要输入这个密码
- [es@es1 ~]$ ./bin/elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password
- [es@es1 ~]$ ./bin/elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password
- [es@es1 ~]$ ./bin/elasticsearch-keystore add xpack.security.http.ssl.keystore.secure_password
- [es@es1 ~]$ ./bin/elasticsearch-keystore add xpack.security.http.ssl.truststore.secure_password
配置完成所有节点的密钥存储库后,就可以开启elastic集群
创建用户密码的前提是开启集群,使用配置好的脚本启动,sh /xyz/1-start.sh
(也可以使用命令systemctl start elasticsearch.service,效果相同)
- ## 自动创建密码(执行命令目录:/home/elastic)
- [root@es1 elasticsearch]# ./bin/elasticsearch-setup-passwords auto
- ## 手动创建密码(执行命令目录:/home/elastic)
- [root@es1 elasticsearch]# ./bin/elasticsearch-setup-passwords interactive
- ## 重置用户密码(随机密码)(执行命令目录:/home/elastic)
- [root@es1 elasticsearch]# ./bin/elasticsearch-reset-password -u elastic
- ## 重置用户密码(指定密码)(执行命令目录:/home/elastic)
- [root@es1 elasticsearch]# ./bin/elasticsearch-reset-password -u elastic -i <password>
-
- ## 本次使用手动创建,密码均为:CTZQxy601108
- #### 手动方式建账号
- [root@es1 elasticsearch]# ./bin/elasticsearch-setup-passwords interactive
- warning: ignoring JAVA_HOME=/app/elasticsearch/jdk; using bundled JDK
- ******************************************************************************
- Note: The 'elasticsearch-setup-passwords' tool has been deprecated. This command will be removed in a future release.
- ******************************************************************************
-
- Initiating the setup of passwords for reserved users elastic,apm_system,kibana,kibana_system,logstash_system,beats_system,remote_monitoring_user.
- You will be prompted to enter passwords as the process progresses.
- Please confirm that you would like to continue [y/N]y
-
-
- Enter password for [elastic]:
- Reenter password for [elastic]:
- Enter password for [apm_system]:
- Reenter password for [apm_system]:
- Enter password for [kibana_system]:
- Reenter password for [kibana_system]:
- Enter password for [logstash_system]:
- Reenter password for [logstash_system]:
- Enter password for [beats_system]:
- Reenter password for [beats_system]:
- Enter password for [remote_monitoring_user]:
- Reenter password for [remote_monitoring_user]:
- Changed password for user [apm_system]
- Changed password for user [kibana_system]
- Changed password for user [kibana]
- Changed password for user [logstash_system]
- Changed password for user [beats_system]
- Changed password for user [remote_monitoring_user]
- Changed password for user [elastic]
- [root@es1 elasticsearch]#
设置开机自启动并启动服务systemctl enable elasticsearch --now
- cluster.name: cttest
- node.name: node1
- network.host: 192.168.133.200
- bootstrap.memory_lock: true
- path.data: /data/elasticsearch/data
- path.logs: /data/elasticsearch/logs
- http.port: 9200
- transport.port: 9300
- discovery.seed_hosts: ["192.168.133.200:9300","192.168.133.201:9300","192.168.133.202:9300"]
- cluster.initial_master_nodes: ["node1", "node2","node3"]
- #允许http跨域访问,使用kibana必须开启
- http.cors.enabled: true
- http.cors.allow-origin: '*'
- action.auto_create_index: true
- #添加这个配置以后在kibana中才会显示联机状态,否则会显示脱机状态
- xpack.monitoring.collection.enabled: true
- xpack.security.enabled: true
- xpack.security.transport.ssl.enabled: true
- xpack.security.transport.ssl.verification_mode: certificate
- xpack.security.transport.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.transport.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.enabled: true
- xpack.security.http.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.verification_mode: certificate
- xpack.security.http.ssl.client_authentication: optional
设置开机自启动并启动服务systemctl enable elasticsearch --now
- cluster.name: cttest
- node.name: node2
- network.host: 192.168.133.201
- bootstrap.memory_lock: true
- path.data: /data/elasticsearch/data
- path.logs: /data/elasticsearch/logs
- http.port: 9200
- transport.port: 9300
- discovery.seed_hosts: ["192.168.133.200:9300","192.168.133.201:9300","192.168.133.202:9300"]
- cluster.initial_master_nodes: ["node1", "node2","node3"]
- #允许http跨域访问,使用kibana必须开启
- http.cors.enabled: true
- http.cors.allow-origin: '*'
- action.auto_create_index: true
- #添加这个配置以后在kibana中才会显示联机状态,否则会显示脱机状态
- xpack.monitoring.collection.enabled: true
- xpack.security.enabled: true
- xpack.security.transport.ssl.enabled: true
- xpack.security.transport.ssl.verification_mode: certificate
- xpack.security.transport.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.transport.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.enabled: true
- xpack.security.http.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.verification_mode: certificate
- xpack.security.http.ssl.client_authentication: optional
设置开机自启动并启动服务systemctl enable elasticsearch --now
- cluster.name: cttest
- node.name: node3
- network.host: 192.168.133.202
- bootstrap.memory_lock: true
- path.data: /data/elasticsearch/data
- path.logs: /data/elasticsearch/logs
- http.port: 9200
- transport.port: 9300
- discovery.seed_hosts: ["192.168.133.200:9300","192.168.133.201:9300","192.168.133.202:9300"]
- cluster.initial_master_nodes: ["node1", "node2","node3"]
- #允许http跨域访问,使用kibana必须开启
- http.cors.enabled: true
- http.cors.allow-origin: '*'
- action.auto_create_index: true
- #添加这个配置以后在kibana中才会显示联机状态,否则会显示脱机状态
- xpack.monitoring.collection.enabled: true
- xpack.security.enabled: true
- xpack.security.transport.ssl.enabled: true
- xpack.security.transport.ssl.verification_mode: certificate
- xpack.security.transport.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.transport.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.enabled: true
- xpack.security.http.ssl.keystore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.truststore.path: /app/elasticsearch/config/certs/elastic-certificates.p12
- xpack.security.http.ssl.verification_mode: certificate
- xpack.security.http.ssl.client_authentication: optional
- ## 上传并解压至目录
- tar -zxvf kibana-8.12.0-linux-x86_64.tar.gz -C /app
- mv kibana-8.12.0 kibana
-
- ## 创建kibana用户
- groupadd kibana
- useradd -g kibana -p es kibana
-
- ## 复制文件至家目录,避免出现(-bash-4.2$)
- [root@kibana home]# cp /etc/skel/.bash_logout /app/kibana
- [root@kibana home]# cp /etc/skel/.bash_profile /app/kibana
- [root@kibana home]# cp /etc/skel/.bashrc /app/kibana
-
- ## 修改文件属性
- [root@kibana home]# chown -R kibana:kibana /app/kibana
- [root@kibana home]# chmod -R 755 /app/kibana
-
- ## 配置systemd服务
- vim /etc/systemd/system/kibana.service
- ------------------------------
- [Unit]
- Description=kibana service daemon
- After=network.target
- [Service]
- User=kibana
- Group=kibana
- LimitNOFILE=65536
- LimitNPROC=4096
- ExecStart=/app/kibana/bin/kibana
- ExecReload=/bin/kill -HUP \$MAINPID
- KillMode=process
- Restart=on-failure
- RestartSec=10s
- [Install]
- WantedBy=multi-user.target
- ------------------------------
-
- ## 配置启停脚本
- mkdir /xyz
-
- cat >> /xyz/1-start.sh << EOF
- #!/bin/bash
- ## 启动脚本
- systemctl start kibana.service
- EOF
-
- cat >> /xyz/2-ps.sh << EOF
- #!/bin/bash
- ## 检查状态
- systemctl --no-pager status kibana.service
- EOF
-
- cat >> /xyz/3-stop.sh << EOF
- #!/bin/bash
- ## 停止脚本
- systemctl stop kibana.service
- EOF
-
- cat >> /xyz/4-restart.sh << EOF
- #!/bin/bash
- ## 重启脚本
- systemctl restart kibana.service
- EOF
-
- cat >> /xyz/5-update.sh << EOF
- #!/bin/bash
- ## 更新脚本
- systemctl daemon-reload
- EOF
-
- chmod -R 755 /xyz/
-
- ## 重新加载
- systemctl daemon-reload
现在我们已经在 Elasticsearch 集群上启用了安全性,必须对与集群的通信进行身份验证。 因此,如果我们计划使用 Kibana 与集群交互,那么我们必须启用安全性并配置 Kibana 以通过 HTTPS 以 kibana 用户身份向集群进行身份验证。
认证方式
- ## elastic服务器上密钥所在目录(/app/elasticsearch/config/certs)执行命令
- openssl pkcs12 -in elastic-stack-ca.p12 -out newfile.crt.pem -clcerts -nokeys
- ## 生成的文件(newfile.crt.pem)上传到kibana服务器上,具体目录(/app/kibana/config/certs)
- ## 修改kibana.yml配置文件
- vim /app/kibana/config/kibana.yml
- -----------------------------
- elasticsearch.hosts: ["https://192.168.133.200:9300","https://192.168.133.201:9300","https://192.168.133.202:9300"]
- elasticsearch.ssl.certificateAuthorities: ["/app/kibana/config/certs/newfile.crt.pem"]
- elasticsearch.ssl.verificationMode: none
- -----------------------------
- location /kibana {
- proxy_pass http://192.168.133.203:5601;
- proxy_redirect off;
- proxy_set_header Host $host;
- proxy_set_header X-Real-IP $remote_addr;
- proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
- }
kibana配置
- server.basePath: "/kibana"
- server.rewriteBasePath: true
Kibana 的 Alert 模块主要用于 Elastic Stack 的监控告警。以一种相对较低的使用成本,将复杂的查询条件,编辑完成后监控不同的 Elastic Stack 的技术产品中产生的数据,最终把符合条件的告警信息以需要的方式反馈给用户。
前提条件
1. Elasticsearch 集群启用了HTTPS的安全设置
2. 在 Kibana 的配置文件中,添加 xpack.encryptedSavedObjects.encryptionKey 设置,它的值为至少 32 位的字符。
参考:https://blog.csdn.net/qq_33816243/article/details/132352607
- ## 切换至kibana用户
- su - kibana
- ## Kibana 提供了一个命令行工具来生成加密字符串,该命令行工具在 bin 目录下,使用方式如下:
- ./kibana-encryption-keys generate
- -----------------------------
- Kibana is currently running with legacy OpenSSL providers enabled! For details and instructions on how to disable see https://www.elastic.co/guide/en/kibana/8.12/production.html#openssl-legacy-provider
- ## Kibana Encryption Key Generation Utility
-
- The 'generate' command guides you through the process of setting encryption keys for:
-
- xpack.encryptedSavedObjects.encryptionKey
- Used to encrypt stored objects such as dashboards and visualizations
- https://www.elastic.co/guide/en/kibana/current/xpack-security-secure-saved-objects.html#xpack-security-secure-saved-objects
-
- xpack.reporting.encryptionKey
- Used to encrypt saved reports
- https://www.elastic.co/guide/en/kibana/current/reporting-settings-kb.html#general-reporting-settings
-
- xpack.security.encryptionKey
- Used to encrypt session information
- https://www.elastic.co/guide/en/kibana/current/security-settings-kb.html#security-session-and-cookie-settings
-
-
- Already defined settings are ignored and can be regenerated using the --force flag. Check the documentation links for instructions on how to rotate encryption keys.
- Definitions should be set in the kibana.yml used configure Kibana.
-
- Settings:
- xpack.encryptedSavedObjects.encryptionKey: b87e67507f0282a3a0aae8d2cb569fb4
- xpack.reporting.encryptionKey: e66823775d4c0a099db7767870582717
- xpack.security.encryptionKey: b06948f600f733ec36cc61470cc4d451
- -----------------------------
生产环境下还需要配置server.publicBaseUrl,否则会造成某些功能不正常。而且每次登陆都会进行提醒。
- ## 生产环境访问的域名为http://192.168.133.204/kibana/
- NGINX上的location配置为:
- location /kibana ##后面不要带/
- ## kibana文件配置,地址后面不要带/:
- vim /app/kibana/config/kibana.yml
- -----------------------------
- server.publicBaseUrl: "http://192.168.133.204/kibana/"
- -----------------------------
- server.port: 5601
- server.host: "192.168.133.203"
-
- server.basePath: "/kibana"
- server.rewriteBasePath: true
-
- server.publicBaseUrl: "http://192.168.133.204/kibana"
-
- elasticsearch.hosts: ["https://192.168.133.200:9200","https://192.168.133.201:9200","https://192.168.133.202:9200"]
- i18n.locale: "zh-CN"
- elasticsearch.ssl.certificateAuthorities: ["/app/kibana/config/certs/newfile.crt.pem"]
- elasticsearch.ssl.verificationMode: none
- xpack.encryptedSavedObjects.encryptionKey: b87e67507f0282a3a0aae8d2cb569fb4
- xpack.reporting.encryptionKey: e66823775d4c0a099db7767870582717
- xpack.security.encryptionKey: b06948f600f733ec36cc61470cc4d451
启动kibana:sh /xyz/1-start.sh
设置开机自启动并启动服务systemctl enable kibana --now
- ## 上传至数据库
- mkdir /app
- tar -zxvf logstash-8.12.0-linux-x86_64.tar.gz -C /app
- mv logstash-8.12.0 logstash
- ## 配置服务启动项
- vim /etc/systemd/system/logstash.service
- -----------------------------
- [Unit]
- Description=Logstash
- Requires=network.service
- After=network.service
-
- [Service]
- LimitNOFILE=65536
- LimitMEMLOCK=infinity
- ExecStart=/app/logstash/bin/logstash -f /app/logstash/config/logstash.conf
- ExecReload=/bin/kill -HUP $MAINPID
- KillMode=mixed
- SuccessExitStatus=143
- Restart=on-failure
-
- [Install]
- WantedBy=multi-user.target
- -----------------------------
-
- ## systemd脚本配置
- mkdir /xyz
-
- cat >> /xyz/1-start.sh << EOF
- #!/bin/bash
- ## 启动脚本
- systemctl start logstash.service
- EOF
-
- cat >> /xyz/2-ps.sh << EOF
- #!/bin/bash
- ## 检查状态
- systemctl --no-pager status logstash.service
- EOF
-
- cat >> /xyz/3-stop.sh << EOF
- #!/bin/bash
- ## 停止脚本
- systemctl stop logstash.service
- EOF
-
- cat >> /xyz/4-restart.sh << EOF
- #!/bin/bash
- ## 重启脚本
- systemctl restart logstash.service
- EOF
-
- cat >> /xyz/5-update.sh << EOF
- #!/bin/bash
- ## 更新脚本
- systemctl daemon-reload
- EOF
-
- chmod -R 755 /xyz/
首先在kibana界面配置API
然后将API的密钥复制到kibana.yml配置文件内
- ## 配置logstash.conf
- vim /app/logstash/config/logstash.conf
- -----------------------------
- input {
- beats {
- port => 5044
- }
- }
-
- filter {
- if [message] =~ /200/ {
- drop {}
- }
-
- json {
- source => "message"
- remove_field => ["message","beat"]
- }
-
- date {
- match => ["timestamp","dd/MMM/yyyy:HH:mm:ss Z"]
- }
- }
-
- output {
- elasticsearch {
- hosts => ["https://192.168.133.200:9200","https://192.168.133.201:9200","https://192.168.133.202:9200"]
- index => "test-nginx-%{+YYYY.MM.dd}"
- ssl => true
- api_key => "BPmgoo4B7uBAvai7BEUG:gbQN4kI2TXaaH_IpSVFkIg"
- ilm_enabled => true
- ssl_certificate_verification => false
- }
- }
- -----------------------------
安装程序目录为:/usr/local/filebeat
- ## 安装
- tar -zxvf filebeat-8.12.0-linux-x86_64.tar.gz -C /usr/local
- cd /usr/local
- mv filebeat-8.12.0 filebeat
- ## 配置服务启动项
-
- vim /etc/systemd/system/filebeat.service
- -----------------------------
- [Unit]
- Description=Filebeat
- Wants=network.service
- After=network.service
-
- [Service]
- ExecStart=/usr/local/filebeat/filebeat -e -c /usr/local/filebeat/filebeat.yml
- Restart=always
- ExecReload=/bin/kill -HUP $MAINPID
- KillMode=mixed
- Restart=on-failure
-
- [Install]
- WantedBy=multi-user.target
- -----------------------------
本次采集的日志文件为/data/logs,采集目录下所有的日志的话,设置为:/data/logs/*.log
采集的数据输出至logstash:192.168.133.204:5044
- vim /usr/local/filebeat/filebeat.yml
- -----------------------------
- filebeat.inputs:
- - type: log
- enabled: true
- paths:
- - /data/logs/*.log
- tags: ["nginx"]
- json.keys_under_root: true
- json.add_error_key: true
- fields:
- log_topic: "nginx_log"
- output.logstash:
- hosts: ["192.168.133.204:5044"]
- -----------------------------
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。