赞
踩
docker pull elasticsearch:7.6.1
docker pull kibana:7.6.1
docker pull logstash 7.6.1
docker pull mobz/elasticsearch-head
version: '3' services: es: image: elasticsearch:7.6.1 container_name: es privileged: true ports: - "9200:9200" restart: always networks: - front-ms environment: - discovery.type=single-node - "ES_JAVA_OPTS=-Xms512m -Xmx512m" volumes: - $PWD/data/es:/usr/share/elasticsearch/data - $PWD/logs/es:/usr/share/elasticsearch/logs - $PWD/conf/es/plugins:/usr/share/elasticsearch/plugins es-head: image: mobz/elasticsearch-head:5 container_name: es-head ports: - "9100:9100" kibana: image: kibana:7.6.1 container_name: kibana ports: - "5601:5601" depends_on: - es networks: - front-ms privileged: true volumes: - $PWD/conf/es/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml logstash: image: logstash:7.6.1 container_name: logstash volumes: - $PWD/conf/es/logstash/logstash.conf:/usr/share/logstash/pipeline/logstash.conf depends_on: - es ports: - 4560:4560 networks: - front-ms networks: front-ms: driver: bridge
如果es挂载data或者logs导致无法启动
- "TAKE_FILE_OWNERSHIP=true"
进入elasticsearch容器内部,在/usr/local/bin目录有一个启动文件
查看此文件可得知
TAKE_FILE_OWNERSHIP 变量为ture后会执行 chown -R 1000:0/usr/share/elasticsearch/{data,logs}命令
通过命令可得知,1000对应elasticsearch对象,0为root对象,root有些情况无法执行操作
添加参数前
添加参数后
docker exec -it es /bin/bash
http.cors.enabled: true
http.cors.allow-origin: "*"
docker-compose restart es
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://es:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
# 设置中文显示
i18n.locale: zh-CN
input { tcp { mode => "server" host => "0.0.0.0" port => 4560 codec => json_lines } } filter { grok { match => ["message", "(?<customer_time>%{YEAR}\-%{MONTH}\-%{MONTHDAY}\s+%{TIME})"] } date { match => ["customer_time", "yyyy-MM-dd HH:mm:ss,SSS", "ISO8601"] locale => "en" target => ["@timestamp"] timezone => "Asia/Shanghai" } if [level] !~ "(ERROR|INFO|DEBUG)"{ drop {} } json { source => "message" } } output { if [level] == "INFO" { elasticsearch { hosts => "ip:9200" index => "infolog-%{+YYYY.MM.dd}" } }else if [level] == "ERROR" { elasticsearch { hosts => "ip:9200" index => "errorlog-%{+YYYY.MM.dd}" } }else if [level] == "DEBUG" { elasticsearch { hosts => "ip:9200" index => "debuglog-%{+YYYY.MM.dd}" } } stdout { codec => rubydebug } }
<?xml version="1.0" encoding="UTF-8"?> <configuration> <!--设置变量,name为变量名,value为值,可以使用${变量名}方式使用--> <property name="DIR" value="log"/> <property name="LOG_HOME" value="logs"/> <property name="INFO_NAME" value="info_log"/> <property name="INFO_LOG_NAME" value="info"/> <property name="ERROR_NAME" value="error_log"/> <property name="ERROR_LOG_NAME" value="error"/> <property name="DEBUG_NAME" value="debug_log"/> <property name="DEBUG_LOG_NAME" value="debug"/> <property name="MDC_LOG_PATTERN" value="%red(%d{yyyy-MM-dd'T'HH:mm:ss.SSS}) %green(%p api %t) %blue(%logger{50}) %yellow([line:%L %msg]%n)"></property> <!-- 运行日志记录器,日期滚动记录 --> <appender name="info" class="ch.qos.logback.core.rolling.RollingFileAppender"> <!-- 正在记录的日志文件的路径及文件名 --> <file>${LOG_HOME}/${INFO_NAME}/${INFO_LOG_NAME}.log</file> <!-- 日志记录器的滚动策略,按日期,按大小记录--> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_HOME}/${INFO_NAME}/${INFO_LOG_NAME}-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <!-- 除按日志记录之外,还配置了日志文件不能超过50M,若超过50M,日志文件会以索引0开始, 命名日志文件,例如bizlog-biz-20181219.0.log --> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>50MB</maxFileSize> <!--保存时间3天--> <!--<MaxHistory>3</MaxHistory>--> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <!-- 追加方式记录日志 --> <append>true</append> <!-- 日志文件的格式 --> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%d{yyyy/MM/dd' 'HH:mm:ss.SSS} %X{req.requestId}[line:%L %msg] %n</pattern> <charset>utf-8</charset> </encoder> <!-- 此日志文件只记录info级别的 --> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>info</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="error" class="ch.qos.logback.core.rolling.RollingFileAppender"> <!-- 正在记录的日志文件的路径及文件名 --> <file>${LOG_HOME}/${ERROR_NAME}/${ERROR_LOG_NAME}.log</file> <!-- 日志记录器的滚动策略,按日期,按大小记录--> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_HOME}/${ERROR_NAME}/${ERROR_LOG_NAME}-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <!-- 除按日志记录之外,还配置了日志文件不能超过50M,若超过50M,日志文件会以索引0开始, 命名日志文件,例如bizlog-biz-20181219.0.log --> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>50MB</maxFileSize> <!--保存时间3天--> <!--<MaxHistory>3</MaxHistory>--> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <!-- 追加方式记录日志 --> <append>true</append> <!-- 日志文件的格式 --> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%d{yyyy/MM/dd' 'HH:mm:ss.SSS} %X{req.requestId}[line:%L %msg] %n</pattern> <charset>utf-8</charset> </encoder> <!-- 此日志文件只记录info级别的 --> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>error</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <appender name="debug" class="ch.qos.logback.core.rolling.RollingFileAppender"> <!-- 正在记录的日志文件的路径及文件名 --> <file>${LOG_HOME}/${DEBUG_NAME}/${DEBUG_LOG_NAME}.log</file> <!-- 日志记录器的滚动策略,按日期,按大小记录--> <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> <fileNamePattern>${LOG_HOME}/${DEBUG_NAME}/${DEBUG_LOG_NAME}-%d{yyyy-MM-dd}.%i.log</fileNamePattern> <!-- 除按日志记录之外,还配置了日志文件不能超过50M,若超过50M,日志文件会以索引0开始, 命名日志文件,例如bizlog-biz-20181219.0.log --> <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> <maxFileSize>50MB</maxFileSize> <!--保存时间3天--> <!--<MaxHistory>3</MaxHistory>--> </timeBasedFileNamingAndTriggeringPolicy> </rollingPolicy> <!-- 追加方式记录日志 --> <append>true</append> <!-- 日志文件的格式 --> <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder"> <pattern>%d{yyyy/MM/dd' 'HH:mm:ss.SSS} %X{req.requestId}[line:%L %msg] %n</pattern> <charset>utf-8</charset> </encoder> <!-- 此日志文件只记录debug级别的 --> <filter class="ch.qos.logback.classic.filter.LevelFilter"> <level>debug</level> <onMatch>ACCEPT</onMatch> <onMismatch>DENY</onMismatch> </filter> </appender> <!--ConsoleAppender是打印到控制台的--> <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender"> <!--encoder 默认配置为PatternLayoutEncoder--> <encoder> <pattern>${MDC_LOG_PATTERN}</pattern> <charset>utf-8</charset> </encoder> <!--此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息--> <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> <level>all</level> </filter> </appender> <!-- 这里配置的是logstash传输方式 --> <appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender"> <destination>ip:4560</destination> <encoder charset="UTF-8" class="net.logstash.logback.encoder.LogstashEncoder"/> </appender> <!--根loger。只有一个level属性,应为已经被命名为"root".--> <root level="info"> <appender-ref ref="info"/> <appender-ref ref="STDOUT"/> <appender-ref ref="error"/> <appender-ref ref="debug"/> <appender-ref ref="LOGSTASH"/> </root> </configuration>
logging:
level:
root: INFO
config: classpath:logback-spring.xml
查看数据浏览发现数据没有加载出来
contentType: "application/x-www-form-urlencoded
contentType: "application/json;charset=UTF-8"
回到kibana页面
try {
String s = null;
System.out.println(s.substring(0,1));
}catch (Exception e){
logger.error("error",e);
}
e.printStackTrace()如果无法被logstash捕捉,就替换为slf4j包中的logger.error(“error”,e)
点击Discover页签进行查看
三种类型的日志都有了
docker exec -it es /bin/bash
进入容器内部,bin目录上一级
依次执行以下命令
命令1
bin/elasticsearch-certutil ca
报错没有足够的内存了。
docker-compose中调大内存参数 “ES_JAVA_OPTS=-Xms2g -Xmx2g”
退出容器restart一下es
进入容器查看config目录下的jvm.options ,cat jvm.options
生效了,重新执行命令1创建ca证书
如果不生效,就进入容器内部修改jvm.options文件然后重启
如果宿主机内存不够 创建SWAP分区
此处会出现Please enter the … 和 Enter password for …
什么也不需要输入,直接按两下回车
执行命令2
bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12
图太长了分了两块儿截屏
和上一步一样,会出现三行 Enter password… Please enter the… Enter password for…
什么也不要输,直接按回车,出现下图
继续下一步,这一步很关键!!!!!!
ll 查看会发现刚创建好的这两个文件是root所有,需要更改所有者
命令3
chown elasticsearch.elasticsearch elastic-*.p12
ll查看命令是否生效
命令4
mv elastic-*.p12 config/
移动到config目录下
vi elasticsearch.yml
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
:wq 保存并退出
退出容器并重启es
此时es已经需要账号密码了
进入容器内部继续设置密码
命令5
bin/elasticsearch-setup-passwords interactive
y 进入下一步
给以下这么多账户设置并确认密码。。。
设置完成
访问9200页面
账号elastic 密码输入刚才设置的那个
es配置完成
#
# ** THIS IS AN AUTO-GENERATED FILE **
#
# Default Kibana configuration for docker target
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://es:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
elasticsearch.username: "kibana"
elasticsearch.password: "刚才设置好的密码"
i18n.locale: zh-CN
{:url=>"http://ip:9200/",
:error_type=>LogStash:
:Outputs:
:ElasticSearch:
:HttpClient::Pool:
:BadResponseCodeError,
:error=>"Got response code '401' contacting Elasticsearch at URL 'http://ip:9200/'"}
无法连接es接收日志
打开这个confg文件
给所有的elasticsaerch模块填写账号密码
output { if [level] == "INFO" { elasticsearch { hosts => "ip:9200" index => "infolog-%{+YYYY.MM.dd}" user => "elastic" password => "密码" } }else if [level] == "ERROR" { elasticsearch { hosts => "ip:9200" index => "errorlog-%{+YYYY.MM.dd}" user => "elastic" password => "密码" } }else if [level] == "DEBUG" { elasticsearch { hosts => "ip:9200" index => "debuglog-%{+YYYY.MM.dd}" user => "elastic" password => "密码" } }
http.cors.allow-headers: Authorization,X-Requested-With,Content-Length,Content-Type
elasticsearch-certutil http
/usr/share/elasticsearch/config/elastic-stack-ca.p12
是否为每个节点创建证书 n
输入节点hostnames(域名),一行一个,两次回车下一步
确认输入完的域名(y)
输入节点ip地址
Key name : www.baidu.com
Subject DN: CN=www,DC=baidu,DC=com
KeySize: 2048
是否修改上述任意内容 (N)
设置密码,回车跳过
输入压缩文件的文件名,回车确认
查看生成的zip文件
unzip elasticsearch-ssl-http.zip
/elasticsearch
|_ README.txt
|_ http.p12
|_ sample-elasticsearch.yml
/kibana
|_ README.txt
|_ elasticsearch-ca.pem
|_ sample-kibana.yml
xpack.security.http.ssl.enabled: true
xpack.security.http.ssl.keystore.path: http.p12
./bin/elasticsearch-keystore add xpack.security.http.ssl.keystore.secure_password
docker cp es镜像id:/usr/share/elasticsearch/kibana /home/docker/ms-docker
docker cp /home/docker/ms-docker/kibana/elasticsearch-ca.pem kibana镜像id:/usr/share/kibana/config
//添加行
//如果重启报错找不到的话就写elasticsearch-ca.pem存放位置的全路径
elasticsearch.ssl.certificateAuthorities: /config/elasticsearch-ca.pem
//添加行
//默认是full,会校验主机名,如果在生成证书的时候没有设置主机名,这里可以改成certificate
elasticsearch.ssl.verificationMode: certificate
//修改行
elasticsearch.host: ["https://es:9200"]
elasticsearch-certutil cert --pem --name kibana-server --out certs.zip
docker cp es容器id:/usr/share/elasticsearch/certs.zip /home/docker/ms-docker
./certs
├── ca
│ └── ca.crt
└── kibana-server
├── kibana-server.crt
└── kibana-server.key
docker cp /home/docker/ms-docker/kibana-server.crt kibana容器id:/usr/share/kibana/config
docker cp /home/docker/ms-docker/kibana-server.key kibana容器id:/usr/share/kibana/config
server.ssl.enabled: true
server.ssl.certificate: /usr/share/kibana/config/kibana-server.crt
server.ssl.key: /usr/share/kibana/config/kibana-server.key
docker cp /home/docker/ms-docker/kibana/elasticsearch-ca.pem logstash镜像id:/usr/share/logstash/config
ssl => true
cacert => "config/elasticsearch-ca.pem"
output { if [level] == "INFO" { elasticsearch { hosts => "ip:9200" index => "infolog-%{+YYYY.MM.dd}" ssl => true cacert => "config/elasticsearch-ca.pem" } }else if [level] == "ERROR" { elasticsearch { hosts => "ip:9200" index => "errorlog-%{+YYYY.MM.dd}" ssl => true cacert => "config/elasticsearch-ca.pem" } }else if [level] == "DEBUG" { elasticsearch { hosts => "ip:9200" index => "debuglog-%{+YYYY.MM.dd}" ssl => true cacert => "config/elasticsearch-ca.pem" } }
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。