当前位置:   article > 正文

ELK日志监控测试部署

elk启动测试
  1. Logstash是一个开源的用于收集,分析和存储日志的工具。 Kibana4用来搜索和查看Logstash已索引的日志的web接口。这两个工具都基于Elasticsearch。
  2. ● Logstash: Logstash服务的组件,用于处理传入的日志。
  3. ● Elasticsearch: 存储所有日志
  4. ● Kibana 4: 用于搜索和可视化的日志的Web界面,通过nginx反代
  5. ● Logstash Forwarder: 安装在将要把日志发送到logstash的服务器上,作为日志转发的道理,通过 lumberjack 网络协议与 Logstash 服务通讯
  6. 注意:logstash-forwarder要被beats替代了,关注后续内容。后续会转到logstash+elasticsearch+beats上。
  7. ELK架构如下:

 

  1. elasticsearch-1.7.2.tar.gz
  2. kibana-4.1.2-linux-x64.tar.gz
  3. logstash-1.5.6-1.noarch.rpm
  4. logstash-forwarder-0.4.0-1.x86_64.rpm
  5. 单机模式
  6. #OS
  7. CentOS release 6.5 (Final)
  8. #Base and JDK
  9. groupadd elk
  10. useradd -g elk elk
  11. passwd elk
  12. yum install vim lsof man wget ntpdate vixie-cron -y
  13. crontab -e
  14. */1 * * * * /usr/sbin/ntpdate time.windows.com > /dev/null 2>&1
  15. service crond restart
  16. 禁用selinux,关闭iptables
  17. sed -i "s#SELINUX=enforcing#SELINUX=disabled#" /etc/selinux/config
  18. service iptables stop
  19. reboot
  20. tar -zxvf jdk-8u92-linux-x64.tar.gz -C /usr/local/
  21. vim /etc/profile
  22. export JAVA_HOME=/usr/local/jdk1.8.0_92
  23. export JRE_HOME=/usr/local/jdk1.8.0_92/jre
  24. export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$PATH
  25. export CLASSPATH=$CLASSPATH:.:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
  26. source /etc/profile
  27. #Elasticsearch
  28. #(cluster时在其他server安装elasticsearch,并配置相同集群名称,不同节点名称即可)
  29. RPM安装
  30. rpm --import http://packages.elastic.co/GPG-KEY-elasticsearch
  31. wget -c https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.2.noarch.rpm
  32. rpm -ivh elasticsearch-1.7.2.noarch.rpm
  33. tar安装
  34. wget -c https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.2.tar.gz
  35. tar zxvf elasticsearch-1.7.2.tar.gz -C /usr/local/
  36. cd /usr/local/elasticsearch-1.7.2/
  37. mkdir -p /data/{db,logs}
  38. vim config/elasticsearch.yml
  39. #cluster.name: elasticsearch
  40. #node.name: "es-node1"
  41. #node.master: true
  42. #node.data: true
  43. path.data: /data/db
  44. path.logs: /data/logs
  45. network.host: 192.168.28.131
  46. #插件安装
  47. cd /usr/local/elasticsearch-1.7.2/
  48. bin/plugin -install mobz/elasticsearch-head
  49. #https://github.com/mobz/elasticsearch-head
  50. bin/plugin -install lukas-vlcek/bigdesk
  51. bin/plugin install lmenezes/elasticsearch-kopf
  52. #会提示版本过低
  53. 解决办法就是手动下载该软件,不通过插件安装命令...
  54. cd /usr/local/elasticsearch-1.7.2/plugins
  55. wget https://github.com/lmenezes/elasticsearch-kopf/archive/master.zip
  56. unzip master.zip
  57. mv elasticsearch-kopf-master kopf
  58. 以上操作就完全等价于插件的安装命令
  59. cd /usr/local/
  60. chown elk:elk elasticsearch-1.7.2/ -R
  61. chown elk:elk /data/* -R
  62. supervisord安装:
  63. yum install supervisor -y
  64. 末尾添加针对elasticsearch的配置项
  65. vim /etc/supervisord.conf
  66. [program:elasticsearch]
  67. directory = /usr/local/elasticsearch-1.7.2/
  68. ;command = su -c "/usr/local/elasticsearch-1.7.2/bin/elasticsearch" elk
  69. command =/usr/local/elasticsearch-1.7.2/bin/elasticsearch
  70. numprocs = 1
  71. autostart = true
  72. startsecs = 5
  73. autorestart = true
  74. startretries = 3
  75. user = elk
  76. ;stdout_logfile_maxbytes = 200MB
  77. ;stdout_logfile_backups = 20
  78. ;stdout_logfile = /var/log/pvs_elasticsearch_stdout.log
  79. #Kibana(注意版本搭配)
  80. https://download.elastic.co/kibana/kibana/kibana-4.1.2-linux-x64.tar.gz
  81. tar zxvf kibana-4.1.2-linux-x64.tar.gz -C /usr/local/
  82. cd /usr/local/kibana-4.1.2-linux-x64
  83. vim config/kibana.yml
  84. port: 5601
  85. host: "192.168.28.131"
  86. elasticsearch_url: "http://192.168.28.131:9200"
  87. ./bin/kibana -l /var/log/kibana.log #启动服务,kibana 4.0开始是以socket服务启动的
  88. #cd /etc/init.d && curl -o kibana https://gist.githubusercontent.com/thisismitch/8b15ac909aed214ad04a/raw/fc5025c3fc499ad8262aff34ba7fde8c87ead7c0/kibana-4.x-init
  89. #cd /etc/default && curl -o kibana https://gist.githubusercontent.com/thisismitch/8b15ac909aed214ad04a/raw/fc5025c3fc499ad8262aff34ba7fde8c87ead7c0/kibana-4.x-default
  90. #修改对应信息,添加可执行权限
  91. 或者如下:
  92. cat >> /etc/init.d/kibana <<EOF
  93. #!/bin/bash
  94. ### BEGIN INIT INFO
  95. # Provides: kibana
  96. # Default-Start: 2 3 4 5
  97. # Default-Stop: 0 1 6
  98. # Short-Description: Runs kibana daemon
  99. # Description: Runs the kibana daemon as a non-root user
  100. ### END INIT INFO
  101. # Process name
  102. NAME=kibana
  103. DESC="Kibana4"
  104. PROG="/etc/init.d/kibana"
  105. # Configure location of Kibana bin
  106. KIBANA_BIN=/usr/local/kibana-4.1.2-linux-x64/bin
  107. # PID Info
  108. PID_FOLDER=/var/run/kibana/
  109. PID_FILE=/var/run/kibana/$NAME.pid
  110. LOCK_FILE=/var/lock/subsys/$NAME
  111. PATH=/bin:/usr/bin:/sbin:/usr/sbin:$KIBANA_BIN
  112. DAEMON=$KIBANA_BIN/$NAME
  113. # Configure User to run daemon process
  114. DAEMON_USER=root
  115. # Configure logging location
  116. KIBANA_LOG=/var/log/kibana.log
  117. # Begin Script
  118. RETVAL=0
  119. if [ `id -u` -ne 0 ]; then
  120. echo "You need root privileges to run this script"
  121. exit 1
  122. fi
  123. # Function library
  124. . /etc/init.d/functions
  125. start() {
  126. echo -n "Starting $DESC : "
  127. pid=`pidofproc -p $PID_FILE kibana`
  128. if [ -n "$pid" ] ; then
  129. echo "Already running."
  130. exit 0
  131. else
  132. # Start Daemon
  133. if [ ! -d "$PID_FOLDER" ] ; then
  134. mkdir $PID_FOLDER
  135. fi
  136. daemon --user=$DAEMON_USER --pidfile=$PID_FILE $DAEMON 1>"$KIBANA_LOG" 2>&1 &
  137. sleep 2
  138. pidofproc node > $PID_FILE
  139. RETVAL=$?
  140. [[ $? -eq 0 ]] && success || failure
  141. echo
  142. [ $RETVAL = 0 ] && touch $LOCK_FILE
  143. return $RETVAL
  144. fi
  145. }
  146. reload()
  147. {
  148. echo "Reload command is not implemented for this service."
  149. return $RETVAL
  150. }
  151. stop() {
  152. echo -n "Stopping $DESC : "
  153. killproc -p $PID_FILE $DAEMON
  154. RETVAL=$?
  155. echo
  156. [ $RETVAL = 0 ] && rm -f $PID_FILE $LOCK_FILE
  157. }
  158. case "$1" in
  159. start)
  160. start
  161. ;;
  162. stop)
  163. stop
  164. ;;
  165. status)
  166. status -p $PID_FILE $DAEMON
  167. RETVAL=$?
  168. ;;
  169. restart)
  170. stop
  171. start
  172. ;;
  173. reload)
  174. reload
  175. ;;
  176. *)
  177. # Invalid Arguments, print the following message.
  178. echo "Usage: $0 {start|stop|status|restart}" >&2
  179. exit 2
  180. ;;
  181. esac
  182. EOF
  183. chmod +x kibana
  184. mv kibana /etc/init.d/
  185. #Nginx
  186. yum install nginx -y
  187. vim /etc/nginx/conf.d/elk.conf
  188. server {
  189. server_name elk.sudo.com;
  190. auth_basic "Restricted Access";
  191. auth_basic_user_file passwd;
  192. location / {
  193. proxy_pass http://192.168.28.131:5601;
  194. proxy_http_version 1.1;
  195. proxy_set_header Upgrade $http_upgrade;
  196. proxy_set_header Connection 'upgrade';
  197. proxy_set_header Host $host;
  198. proxy_cache_bypass $http_upgrade;
  199. }
  200. }
  201. #htpsswd添加:yum install httpd-tools –y
  202. echo -n 'sudo:' >> /etc/nginx/passwd #添加用户
  203. openssl passwd elk.sudo.com >> /etc/nginx/passwd #添加密码
  204. cat /etc/nginx/passwd #查看
  205. chkconfig nginx on && service nginx start
  206. #Logstash--Setup
  207. rpm --import https://packages.elasticsearch.org/GPG-KEY-elasticsearch
  208. vi /etc/yum.repos.d/logstash.repo
  209. [logstash-1.5]
  210. name=Logstash repository for 1.5.x packages
  211. baseurl=http://packages.elasticsearch.org/logstash/1.5/centos
  212. gpgcheck=1
  213. gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch
  214. enabled=1
  215. yum install logstash -y
  216. #创建SSL证书(在logstash服务器上生成ssl证书。创建ssl证书有两种方式,一种指定IP地址,一种指定fqdn(dns)),选其一即可
  217. #1、IP地址
  218. 在[ v3_ca ]配置段下设置上面的参数。192.168.28.131是logstash服务端的地址。
  219. vi /etc/pki/tls/openssl.cnf
  220. subjectAltName = IP: 192.168.28.131
  221. cd /etc/pki/tls
  222. openssl req -config /etc/pki/tls/openssl.cnf -x509 -days 3650 -batch -nodes -newkey rsa:2048 -keyout private/logstash-forwarder.key -out certs/logstash-forwarder.crt
  223. #注意将-days设置大点,以免证书过期。
  224. #2、fqdn
  225. # 不需要修改openssl.cnf文件。
  226. cd /etc/pki/tls
  227. openssl req -subj '/CN=logstash.sudo.com/' -x509 -days 3650 -batch -nodes -newkey rsa:2048 -keyout private/logstash-forwarder.key -out certs/logstash-forwarder.crt
  228. logstash.sudo.com是我自己测试的域名,所以无需添加logstash.sudo.com的A记录
  229. #Logstash-Config
  230. #添加GeoIP数据源
  231. #wget http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz
  232. #gzip -d GeoLiteCity.dat.gz && mv GeoLiteCity.dat /etc/logstash/.
  233. logstash配置文件是以json格式设置参数的,配置文件位于/etc/logstash/conf.d目录下,配置包括三个部分:输入端,过滤器和输出。
  234. 首先,创建一个01-lumberjack-input.conf文件,设置lumberjack输入,Logstash-Forwarder使用的协议。
  235. vi /etc/logstash/conf.d/01-lumberjack-input.conf
  236. input {
  237. lumberjack {
  238. port => 5043
  239. type => "logs"
  240. ssl_certificate => "/etc/pki/tls/certs/logstash-forwarder.crt"
  241. ssl_key => "/etc/pki/tls/private/logstash-forwarder.key"
  242. }
  243. }
  244. 再来创建一个11-nginx.conf用于过滤nginx日志
  245. vi /etc/logstash/conf.d/11-nginx.conf
  246. filter {
  247. if [type] == "nginx" {
  248. grok {
  249. match => { "message" => "%{IPORHOST:clientip} - %{NOTSPACE:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:method} %{NOTSPACE:request}(?: %{URIPROTO:proto}/%{NUMBER:httpversion})?|%{DATA:rawrequest})\" %{NUMBER:status} (?:%{NUMBER:size}|-) %{QS:referrer} %{QS:a
  250. gent} %{QS:xforwardedfor}" }
  251. add_field => [ "received_at", "%{@timestamp}" ]
  252. add_field => [ "received_from", "%{host}" ]
  253. }
  254. date {
  255. match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
  256. }
  257. # geoip {
  258. # source => "clientip"
  259. # add_tag => [ "geoip" ]
  260. # fields => ["country_name", "country_code2","region_name", "city_name", "real_region_name", "latitude", "longitude"]
  261. # remove_field => [ "[geoip][longitude]", "[geoip][latitude]" ]
  262. # }
  263. }
  264. }
  265. 这个过滤器会寻找被标记为“nginx”类型(Logstash-forwarder定义的)的日志,尝试使用“grok”来分析传入的nginx日志,使之结构化和可查询。type要与logstash-forwarder相匹配。
  266. 同时,要注意nginx日志格式设置,我这里采用默认log_format。
  267. #负载均衡反向代理时可修改为如下格式:
  268. log_format main '$remote_addr - $remote_user [$time_local] "$request" '
  269. '$status $upstream_response_time $request_time $body_bytes_sent '
  270. '"$http_referer" "$http_user_agent" "$http_x_forwarded_for" "$request_body" '
  271. '$scheme $upstream_addr';
  272. 日志格式不对,grok匹配规则要重写。
  273. 可以通过http://grokdebug.herokuapp.com/在线工具进行调试。多数情况下ELK没数据的错误在此处。
  274. #Grok Debug -- http://grokdebug.herokuapp.com/
  275. grok 匹配日志不成功,不要往下看测试。之道匹配成功对为止。可参考ttp://grokdebug.herokuapp.com/patterns# grok匹配模式,对后面写规则匹配很受益的。
  276. 最后,创建一文件,来定义输出。
  277. vi /etc/logstash/conf.d/99-lumberjack-output.conf
  278. output {
  279. if "_grokparsefailure" in [tags] {
  280. file { path => "/var/log/logstash/grokparsefailure-%{type}-%{+YYYY.MM.dd}.log" }
  281. }
  282. elasticsearch {
  283. host => "192.168.28.131"
  284. protocol => "http"
  285. index => "logstash-%{type}-%{+YYYY.MM.dd}"
  286. document_type => "%{type}"
  287. workers => 5
  288. template_overwrite => true
  289. }
  290. #stdout { codec =>rubydebug }
  291. }
  292. 定义结构化的日志存储到elasticsearch,对于不匹配grok的日志写入到文件。注意,后面添加的过滤器文件名要位于01-99之间。因为logstash配置文件有顺序的。
  293. 在调试时候,先不将日志存入到elasticsearch,而是标准输出,以便排错。同时,多看看日志,很多错误在日志里有体现,也容易定位错误在哪。
  294. 在启动logstash服务之前,最好先进行配置文件检测,如下:
  295. # /opt/logstash/bin/logstash --configtest -f /etc/logstash/conf.d/*
  296. Configuration OK
  297. 也可指定文件名检测,直到OK才行。不然,logstash服务器起不起来。最后,就是启动logstash服务了。
  298. #logstash-forwarder
  299. 需要将在安装logstash时候创建的ssl证书的公钥logstash.crt拷贝到每台logstash-forwarder服务器(需监控日志的server)
  300. wget https://download.elastic.co/logstash-forwarder/binaries/logstash-forwarder-0.4.0-1.x86_64.rpm
  301. rpm -ivh logstash-forwarder-0.4.0-1.x86_64.rpm
  302. vi /etc/logstash-forwarder.conf
  303. {
  304. "network": {
  305. "servers": [ "192.168.28.131:5043" ],
  306. "ssl ca": "/etc/pki/tls/certs/logstash-forwarder.crt",
  307. "timeout": 30
  308. },
  309. "files": [
  310. {
  311. "paths": [ "/var/log/nginx/*-access.log" ],
  312. "fields": { "type": "nginx" }
  313. }
  314. ]
  315. }
  316. 配置文件是json格式,格式不对logstash-forwarder服务是启动不起来的。
  317. 后面就是启动logstash-forwarder服务了
  318. echo -e "192.168.28.131 Test1\n192.168.28.130 Test2\n192.168.28.138 Test3">>/etc/hosts #不添加elasticsearch启动会报错(无法识别Test*)
  319. su - elk
  320. cd /usr/local/elasticsearch-1.7.2
  321. nohup ./bin/elasticsearch &
  322. (可以通过supervisord进行管理,与其他服务一同开机启动)
  323. elk:
  324. service logstash restart
  325. service kibana restart
  326. 访问http://elk.sudo.com:9200/查询启动是否成功
  327. client:
  328. service nginx start && service logstash-forwarder start
  329. #使用redis存储日志(队列),创建对应的配置文件
  330. vi /etc/logstash/conf.d/redis-input.conf
  331. input {
  332. lumberjack {
  333. port => 5043
  334. type => "logs"
  335. ssl_certificate => "/etc/pki/tls/certs/logstash-forwarder.crt"
  336. ssl_key => "/etc/pki/tls/private/logstash-forwarder.key"
  337. }
  338. }
  339. filter {
  340. if [type] == "nginx" {
  341. grok {
  342. match => { "message" => "%{IPORHOST:clientip} - %{NOTSPACE:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:method} %{NOTSPACE:request}(?: %{URIPROTO:proto}/%{NUMBER:httpversion})?|%{DATA:rawrequest})\" %{NUMBER:status} (?:%{NUMBER:size}|-) %{QS:referrer} %{QS:a
  343. gent} %{QS:xforwardedfor}" }
  344. add_field => [ "received_at", "%{@timestamp}" ]
  345. add_field => [ "received_from", "%{host}" ]
  346. }
  347. date {
  348. match => [ "timestamp" , "dd/MMM/YYYY:HH:mm:ss Z" ]
  349. }
  350. #test
  351. }
  352. }
  353. output {
  354. ####将接收的日志放入redis消息队列####
  355. redis {
  356. host => "127.0.0.1"
  357. port => 6379
  358. data_type => "list"
  359. key => "logstash:redis"
  360. }
  361. }
  362. vi /etc/logstash/conf.d/redis-output.conf
  363. input {
  364. # 读取redis
  365. redis {
  366. data_type => "list"
  367. key => "logstash:redis"
  368. host => "192.168.28.131" #redis-server
  369. port => 6379
  370. #threads => 5
  371. }
  372. }
  373. output {
  374. elasticsearch {
  375. host => "192.168.28.131"
  376. protocol => "http"
  377. index => "logstash-%{type}-%{+YYYY.MM.dd}"
  378. document_type => "%{type}"
  379. workers => 36
  380. template_overwrite => true
  381. }
  382. #stdout { codec =>rubydebug }
  383. }
  384. # /opt/logstash/bin/logstash --configtest -f /etc/logstash/conf.d/*
  385. Configuration OK
  386. 登录redis查询,可以看到日志的对应键值信息已经写入

 

转载于:https://my.oschina.net/HeAlvin/blog/828042

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/盐析白兔/article/detail/321198
推荐阅读
相关标签
  

闽ICP备14008679号