当前位置:   article > 正文

项目大集成

项目大集成

一 keeplived 高可用 

192.168.11.11nginx   keeplived
192.168.11.12nginx   keeplived

两台均编译安装服务器 

1 主服务器修改文件:

2 备服务器修改文本

scp keepalived.conf 192.168.11.12:/etc/keepalived/

3 给主服务器添加虚拟ip 

 ifconfig ens33:0 192.168.11.188 netmask 255.255.255.255

ifconfig ens33: 192.168.11.188/32

 4 去真机windows添加虚拟ip

192.168.11.188 www.benet.com www.kgc.com

5 添加脚本 

6执行权限 

chmod +x ng.sh 

7 主服务器关闭nginx

systemctl stop nginx

8 去被服务器检测:

二 实验 动静分离

承接高可用

1先把关闭nginx启动

去浏览器上检测 

在虚拟机nginx做静

一 编译安装tomcat:192.168.11.13

另一台tomcat也用此方法

  1. [root@mcb-11-13 ~]# systemctl stop firewalld
  2. [root@mcb-11-13 ~]# setenforce 0
  3. [root@mcb-11-13 ~]# hostnamectl set-hostname slave01
  4. [root@mcb-11-13 ~]# su
  5. [root@slave01 ~]# mkdir /data
  6. [root@slave01 ~]# cd /data
  7. [root@slave01 data]# rz -E
  8. rz waiting to receive.
  9. [root@slave01 data]# rz -E
  10. rz waiting to receive.
  11. [root@slave01 data]# ls
  12. apache-tomcat-9.0.16.tar.gz jdk-8u291-linux-x64.tar.gz
  13. [root@slave01 data]# tar xf jdk-8u291-linux-x64.tar.gz -C /usr/local
  14. [root@slave01 data]# cd /usr/local
  15. [root@slave01 local]# ll
  16. 总用量 0
  17. drwxr-xr-x. 2 root root 6 115 2016 bin
  18. drwxr-xr-x. 2 root root 6 115 2016 etc
  19. drwxr-xr-x. 2 root root 6 115 2016 games
  20. drwxr-xr-x. 2 root root 6 115 2016 include
  21. drwxr-xr-x. 8 10143 10143 273 48 2021 jdk1.8.0_291
  22. drwxr-xr-x. 2 root root 6 115 2016 lib
  23. drwxr-xr-x. 2 root root 6 115 2016 lib64
  24. drwxr-xr-x. 2 root root 6 115 2016 libexec
  25. drwxr-xr-x. 2 root root 6 115 2016 sbin
  26. drwxr-xr-x. 5 root root 49 315 19:36 share
  27. drwxr-xr-x. 2 root root 6 115 2016 src
  28. [root@slave01 local]# ln -s jdk1.8.0_291/ jdk
  29. [root@slave01 local]# ls
  30. bin etc games include jdk jdk1.8.0_291 lib lib64 libexec sbin share src
  31. [root@slave01 local]# . /etc/profile.d/env.sh
  32. [root@slave01 local]# java -version
  33. openjdk version "1.8.0_131" #文本配置文件错误,此时要修改
  34. OpenJDK Runtime Environment (build 1.8.0_131-b12)
  35. OpenJDK 64-Bit Server VM (build 25.131-b12, mixed mode)
  36. [root@slave01 local]# vim /etc/profile.d/env.sh
  37. [root@slave01 local]# . /etc/profile.d/env.sh
  38. [root@slave01 local]# java -version
  39. java version "1.8.0_291" #jdk 这个版本才是正确的
  40. Java(TM) SE Runtime Environment (build 1.8.0_291-b10)
  41. Java HotSpot(TM) 64-Bit Server VM (build 25.291-b10, mixed mode)
  42. [root@slave01 local]# ls
  43. bin etc games include jdk jdk1.8.0_291 lib lib64 libexec sbin share src
  44. [root@slave01 local]# cd /data
  45. [root@slave01 data]# ls
  46. apache-tomcat-9.0.16.tar.gz jdk-8u291-linux-x64.tar.gz
  47. [root@slave01 data]# tar xf apache-tomcat-9.0.16.tar.gz
  48. [root@slave01 data]# ls
  49. apache-tomcat-9.0.16 apache-tomcat-9.0.16.tar.gz jdk-8u291-linux-x64.tar.gz
  50. [root@slave01 data]# cp -r apache-tomcat-9.0.16 /usr/local
  51. [root@slave01 data]# cd /usr/local
  52. [root@slave01 local]# ls
  53. apache-tomcat-9.0.16 etc include jdk1.8.0_291 lib64 sbin src
  54. bin games jdk lib libexec share
  55. [root@slave01 local]# ln -s apache-tomcat-9.0.16/ tomcat
  56. [root@slave01 local]# ll
  57. 总用量 0
  58. drwxr-xr-x. 9 root root 220 419 18:45 apache-tomcat-9.0.16
  59. drwxr-xr-x. 2 root root 6 115 2016 bin
  60. drwxr-xr-x. 2 root root 6 115 2016 etc
  61. drwxr-xr-x. 2 root root 6 115 2016 games
  62. drwxr-xr-x. 2 root root 6 115 2016 include
  63. lrwxrwxrwx. 1 root root 13 419 18:40 jdk -> jdk1.8.0_291/
  64. drwxr-xr-x. 8 10143 10143 273 48 2021 jdk1.8.0_291
  65. drwxr-xr-x. 2 root root 6 115 2016 lib
  66. drwxr-xr-x. 2 root root 6 115 2016 lib64
  67. drwxr-xr-x. 2 root root 6 115 2016 libexec
  68. drwxr-xr-x. 2 root root 6 115 2016 sbin
  69. drwxr-xr-x. 5 root root 49 315 19:36 share
  70. drwxr-xr-x. 2 root root 6 115 2016 src
  71. lrwxrwxrwx. 1 root root 21 419 18:46 tomcat -> apache-tomcat-9.0.16/
  72. [root@slave01 local]# useradd tomcat -s /sbin/nologin
  73. [root@slave01 local]# useradd tomcat -s /sbin/nologin -R #建错了,需要修改
  74. useradd: 选项“-R”需要一个选项
  75. [root@slave01 local]# useradd tomcat -s /sbin/nologin -M
  76. useradd:用户“tomcat”已存在
  77. [root@slave01 local]# userdel tomcat
  78. [root@slave01 local]# find / -name tomcat
  79. /etc/selinux/targeted/active/modules/100/tomcat
  80. /var/spool/mail/tomcat
  81. /usr/local/tomcat
  82. /home/tomcat
  83. [root@slave01 local]# cat /etc/passwd
  84. root:x:0:0:root:/root:/bin/bash
  85. bin:x:1:1:bin:/bin:/sbin/nologin
  86. daemon:x:2:2:daemon:/sbin:/sbin/nologin
  87. adm:x:3:4:adm:/var/adm:/sbin/nologin
  88. lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin
  89. sync:x:5:0:sync:/sbin:/bin/sync
  90. shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown
  91. halt:x:7:0:halt:/sbin:/sbin/halt
  92. mail:x:8:12:mail:/var/spool/mail:/sbin/nologin
  93. operator:x:11:0:operator:/root:/sbin/nologin
  94. games:x:12:100:games:/usr/games:/sbin/nologin
  95. ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin
  96. nobody:x:99:99:Nobody:/:/sbin/nologin
  97. systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin
  98. dbus:x:81:81:System message bus:/:/sbin/nologin
  99. polkitd:x:999:998:User for polkitd:/:/sbin/nologin
  100. abrt:x:173:173::/etc/abrt:/sbin/nologin
  101. libstoragemgmt:x:998:996:daemon account for libstoragemgmt:/var/run/lsm:/sbin/nologin
  102. rpc:x:32:32:Rpcbind Daemon:/var/lib/rpcbind:/sbin/nologin
  103. colord:x:997:995:User for colord:/var/lib/colord:/sbin/nologin
  104. saslauth:x:996:76:Saslauthd user:/run/saslauthd:/sbin/nologin
  105. rtkit:x:172:172:RealtimeKit:/proc:/sbin/nologin
  106. pulse:x:171:171:PulseAudio System Daemon:/var/run/pulse:/sbin/nologin
  107. chrony:x:995:991::/var/lib/chrony:/sbin/nologin
  108. rpcuser:x:29:29:RPC Service User:/var/lib/nfs:/sbin/nologin
  109. nfsnobody:x:65534:65534:Anonymous NFS User:/var/lib/nfs:/sbin/nologin
  110. ntp:x:38:38::/etc/ntp:/sbin/nologin
  111. tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin
  112. usbmuxd:x:113:113:usbmuxd user:/:/sbin/nologin
  113. geoclue:x:994:989:User for geoclue:/var/lib/geoclue:/sbin/nologin
  114. qemu:x:107:107:qemu user:/:/sbin/nologin
  115. radvd:x:75:75:radvd user:/:/sbin/nologin
  116. setroubleshoot:x:993:988::/var/lib/setroubleshoot:/sbin/nologin
  117. sssd:x:992:987:User for sssd:/:/sbin/nologin
  118. gdm:x:42:42::/var/lib/gdm:/sbin/nologin
  119. gnome-initial-setup:x:991:986::/run/gnome-initial-setup/:/sbin/nologin
  120. sshd:x:74:74:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin
  121. avahi:x:70:70:Avahi mDNS/DNS-SD Stack:/var/run/avahi-daemon:/sbin/nologin
  122. postfix:x:89:89::/var/spool/postfix:/sbin/nologin
  123. tcpdump:x:72:72::/:/sbin/nologin
  124. mcb:x:1000:1000:mcb:/home/mcb:/bin/bash
  125. tomcat:x:1001:1001::/home/tomcat:/sbin/nologin
  126. [root@slave01 local]# userdel tomcat
  127. [root@slave01 local]#
  128. [root@slave01 local]# cat /etc/passwd
  129. root:x:0:0:root:/root:/bin/bash
  130. bin:x:1:1:bin:/bin:/sbin/nologin
  131. daemon:x:2:2:daemon:/sbin:/sbin/nologin
  132. adm:x:3:4:adm:/var/adm:/sbin/nologin
  133. lp:x:4:7:lp:/var/spool/lpd:/sbin/nologin
  134. sync:x:5:0:sync:/sbin:/bin/sync
  135. shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown
  136. halt:x:7:0:halt:/sbin:/sbin/halt
  137. mail:x:8:12:mail:/var/spool/mail:/sbin/nologin
  138. operator:x:11:0:operator:/root:/sbin/nologin
  139. games:x:12:100:games:/usr/games:/sbin/nologin
  140. ftp:x:14:50:FTP User:/var/ftp:/sbin/nologin
  141. nobody:x:99:99:Nobody:/:/sbin/nologin
  142. systemd-network:x:192:192:systemd Network Management:/:/sbin/nologin
  143. dbus:x:81:81:System message bus:/:/sbin/nologin
  144. polkitd:x:999:998:User for polkitd:/:/sbin/nologin
  145. abrt:x:173:173::/etc/abrt:/sbin/nologin
  146. libstoragemgmt:x:998:996:daemon account for libstoragemgmt:/var/run/lsm:/sbin/nologin
  147. rpc:x:32:32:Rpcbind Daemon:/var/lib/rpcbind:/sbin/nologin
  148. colord:x:997:995:User for colord:/var/lib/colord:/sbin/nologin
  149. saslauth:x:996:76:Saslauthd user:/run/saslauthd:/sbin/nologin
  150. rtkit:x:172:172:RealtimeKit:/proc:/sbin/nologin
  151. pulse:x:171:171:PulseAudio System Daemon:/var/run/pulse:/sbin/nologin
  152. chrony:x:995:991::/var/lib/chrony:/sbin/nologin
  153. rpcuser:x:29:29:RPC Service User:/var/lib/nfs:/sbin/nologin
  154. nfsnobody:x:65534:65534:Anonymous NFS User:/var/lib/nfs:/sbin/nologin
  155. ntp:x:38:38::/etc/ntp:/sbin/nologin
  156. tss:x:59:59:Account used by the trousers package to sandbox the tcsd daemon:/dev/null:/sbin/nologin
  157. usbmuxd:x:113:113:usbmuxd user:/:/sbin/nologin
  158. geoclue:x:994:989:User for geoclue:/var/lib/geoclue:/sbin/nologin
  159. qemu:x:107:107:qemu user:/:/sbin/nologin
  160. radvd:x:75:75:radvd user:/:/sbin/nologin
  161. setroubleshoot:x:993:988::/var/lib/setroubleshoot:/sbin/nologin
  162. sssd:x:992:987:User for sssd:/:/sbin/nologin
  163. gdm:x:42:42::/var/lib/gdm:/sbin/nologin
  164. gnome-initial-setup:x:991:986::/run/gnome-initial-setup/:/sbin/nologin
  165. sshd:x:74:74:Privilege-separated SSH:/var/empty/sshd:/sbin/nologin
  166. avahi:x:70:70:Avahi mDNS/DNS-SD Stack:/var/run/avahi-daemon:/sbin/nologin
  167. postfix:x:89:89::/var/spool/postfix:/sbin/nologin
  168. tcpdump:x:72:72::/:/sbin/nologin
  169. mcb:x:1000:1000:mcb:/home/mcb:/bin/bash
  170. [root@slave01 local]#
  171. [root@slave01 local]# find / -name tomcat
  172. /etc/selinux/targeted/active/modules/100/tomcat
  173. /usr/local/tomcat
  174. [root@slave01 local]# useradd tomcat -s /sbin/nologin -M
  175. [root@slave01 local]# chown tomcat:tomcat /usr/local/tomcat/ -R
  176. [root@slave01 local]# systemctl start tomcat
  177. Failed to start tomcat.service: Unit not found.
  178. [root@slave01 local]# vim /usr/lib/systemd/system/tomcat
  179. [root@slave01 local]# systemctl daemon-reload
  180. [root@slave01 local]# systemctl start tomcat #因为tomcat.service没有加service,就不行
  181. Failed to start tomcat.service: Unit not found.
  182. [root@slave01 local]# cd /usr/lib/systemd/system/
  183. [root@slave01 system]# ls
  184. abrt-ccpp.service plymouth-poweroff.service
  185. abrtd.service plymouth-quit.service
  186. abrt-oops.service plymouth-quit-wait.service
  187. abrt-pstoreoops.service plymouth-read-write.service
  188. abrt-vmcore.service plymouth-reboot.service
  189. abrt-xorg.service plymouth-start.service
  190. accounts-daemon.service plymouth-switch-root.service
  191. alsa-restore.service polkit.service
  192. alsa-state.service postfix.service
  193. alsa-store.service poweroff.target
  194. anaconda-direct.service poweroff.target.wants
  195. anaconda-nm-config.service printer.target
  196. anaconda-noshell.service proc-fs-nfsd.mount
  197. anaconda-pre.service proc-sys-fs-binfmt_misc.automount
  198. anaconda.service proc-sys-fs-binfmt_misc.mount
  199. anaconda-shell@.service psacct.service
  200. anaconda-sshd.service qemu-guest-agent.service
  201. anaconda.target quotaon.service
  202. anaconda-tmux@.service radvd.service
  203. arp-ethers.service rc-local.service
  204. atd.service rdisc.service
  205. auditd.service rdma-ndd.service
  206. auth-rpcgss-module.service rdma.service
  207. autofs.service realmd.service
  208. autovt@.service reboot.target
  209. avahi-daemon.service reboot.target.wants
  210. avahi-daemon.socket remote-fs-pre.target
  211. basic.target remote-fs.target
  212. basic.target.wants rescue.service
  213. blk-availability.service rescue.target
  214. bluetooth.service rescue.target.wants
  215. bluetooth.target rhel-autorelabel-mark.service
  216. brandbot.path rhel-autorelabel.service
  217. brandbot.service rhel-configure.service
  218. brltty.service rhel-dmesg.service
  219. canberra-system-bootup.service rhel-domainname.service
  220. canberra-system-shutdown-reboot.service rhel-import-state.service
  221. canberra-system-shutdown.service rhel-loadmodules.service
  222. certmonger.service rhel-readonly.service
  223. cgconfig.service rngd.service
  224. cgdcbxd.service rpcbind.service
  225. cgred.service rpcbind.socket
  226. chrony-dnssrv@.service rpcbind.target
  227. chrony-dnssrv@.timer rpc-gssd.service
  228. chronyd.service rpcgssd.service
  229. chrony-wait.service rpcidmapd.service
  230. colord.service rpc-rquotad.service
  231. configure-printer@.service rpc-statd-notify.service
  232. console-getty.service rpc-statd.service
  233. console-shell.service rsyncd.service
  234. container-getty@.service rsyncd@.service
  235. cpupower.service rsyncd.socket
  236. crond.service rsyslog.service
  237. cryptsetup-pre.target rtkit-daemon.service
  238. cryptsetup.target runlevel0.target
  239. ctrl-alt-del.target runlevel1.target
  240. cups-browsed.service runlevel1.target.wants
  241. cups.path runlevel2.target
  242. cups.service runlevel2.target.wants
  243. cups.socket runlevel3.target
  244. dbus-org.freedesktop.hostname1.service runlevel3.target.wants
  245. dbus-org.freedesktop.import1.service runlevel4.target
  246. dbus-org.freedesktop.locale1.service runlevel4.target.wants
  247. dbus-org.freedesktop.login1.service runlevel5.target
  248. dbus-org.freedesktop.machine1.service runlevel5.target.wants
  249. dbus-org.freedesktop.timedate1.service runlevel6.target
  250. dbus.service saslauthd.service
  251. dbus.socket selinux-policy-migrate-local-changes@.service
  252. dbus.target.wants serial-getty@.service
  253. debug-shell.service shutdown.target
  254. default.target shutdown.target.wants
  255. default.target.wants sigpwr.target
  256. dev-hugepages.mount sleep.target
  257. dev-mqueue.mount -.slice
  258. dm-event.service slices.target
  259. dm-event.socket smartcard.target
  260. dmraid-activation.service smartd.service
  261. dnsmasq.service sockets.target
  262. dracut-cmdline.service sockets.target.wants
  263. dracut-initqueue.service sound.target
  264. dracut-mount.service speech-dispatcherd.service
  265. dracut-pre-mount.service spice-vdagentd.service
  266. dracut-pre-pivot.service spice-vdagentd.target
  267. dracut-pre-trigger.service sshd-keygen.service
  268. dracut-pre-udev.service sshd.service
  269. dracut-shutdown.service sshd@.service
  270. ebtables.service sshd.socket
  271. emergency.service sssd-autofs.service
  272. emergency.target sssd-autofs.socket
  273. fcoe.service sssd-nss.service
  274. final.target sssd-nss.socket
  275. firewalld.service sssd-pac.service
  276. firstboot-graphical.service sssd-pac.socket
  277. flatpak-system-helper.service sssd-pam-priv.socket
  278. fprintd.service sssd-pam.service
  279. fstrim.service sssd-pam.socket
  280. fstrim.timer sssd-secrets.service
  281. gdm.service sssd-secrets.socket
  282. geoclue.service sssd.service
  283. getty@.service sssd-ssh.service
  284. getty.target sssd-ssh.socket
  285. graphical.target sssd-sudo.service
  286. graphical.target.wants sssd-sudo.socket
  287. gssproxy.service suspend.target
  288. halt-local.service swap.target
  289. halt.target sys-fs-fuse-connections.mount
  290. halt.target.wants sysinit.target
  291. hibernate.target sysinit.target.wants
  292. hybrid-sleep.target sys-kernel-config.mount
  293. hypervfcopyd.service sys-kernel-debug.mount
  294. hypervkvpd.service syslog.socket
  295. hypervvssd.service syslog.target.wants
  296. initial-setup-graphical.service sysstat.service
  297. initial-setup-reconfiguration.service systemd-ask-password-console.path
  298. initial-setup.service systemd-ask-password-console.service
  299. initial-setup-text.service systemd-ask-password-plymouth.path
  300. initrd-cleanup.service systemd-ask-password-plymouth.service
  301. initrd-fs.target systemd-ask-password-wall.path
  302. initrd-parse-etc.service systemd-ask-password-wall.service
  303. initrd-root-fs.target systemd-backlight@.service
  304. initrd-switch-root.service systemd-binfmt.service
  305. initrd-switch-root.target systemd-bootchart.service
  306. initrd-switch-root.target.wants systemd-firstboot.service
  307. initrd.target systemd-fsck-root.service
  308. initrd.target.wants systemd-fsck@.service
  309. initrd-udevadm-cleanup-db.service systemd-halt.service
  310. instperf.service systemd-hibernate-resume@.service
  311. iprdump.service systemd-hibernate.service
  312. iprinit.service systemd-hostnamed.service
  313. iprupdate.service systemd-hwdb-update.service
  314. iprutils.target systemd-hybrid-sleep.service
  315. ipsec.service systemd-importd.service
  316. irqbalance.service systemd-initctl.service
  317. iscsid.service systemd-initctl.socket
  318. iscsid.socket systemd-journal-catalog-update.service
  319. iscsi.service systemd-journald.service
  320. iscsi-shutdown.service systemd-journald.socket
  321. iscsiuio.service systemd-journal-flush.service
  322. iscsiuio.socket systemd-kexec.service
  323. kdump.service systemd-localed.service
  324. kexec.target systemd-logind.service
  325. kexec.target.wants systemd-machined.service
  326. kmod-static-nodes.service systemd-machine-id-commit.service
  327. kpatch.service systemd-modules-load.service
  328. ksm.service systemd-nspawn@.service
  329. ksmtuned.service systemd-poweroff.service
  330. libstoragemgmt.service systemd-quotacheck.service
  331. libvirtd.service systemd-random-seed.service
  332. lldpad.service systemd-readahead-collect.service
  333. lldpad.socket systemd-readahead-done.service
  334. local-fs-pre.target systemd-readahead-done.timer
  335. local-fs.target systemd-readahead-drop.service
  336. local-fs.target.wants systemd-readahead-replay.service
  337. lvm2-lvmetad.service systemd-reboot.service
  338. lvm2-lvmetad.socket systemd-remount-fs.service
  339. lvm2-lvmpolld.service systemd-rfkill@.service
  340. lvm2-lvmpolld.socket systemd-shutdownd.service
  341. lvm2-monitor.service systemd-shutdownd.socket
  342. lvm2-pvscan@.service systemd-suspend.service
  343. machine.slice systemd-sysctl.service
  344. machines.target systemd-timedated.service
  345. mdadm-grow-continue@.service systemd-tmpfiles-clean.service
  346. mdadm-last-resort@.service systemd-tmpfiles-clean.timer
  347. mdadm-last-resort@.timer systemd-tmpfiles-setup-dev.service
  348. mdmonitor.service systemd-tmpfiles-setup.service
  349. mdmon@.service systemd-udevd-control.socket
  350. messagebus.service systemd-udevd-kernel.socket
  351. microcode.service systemd-udevd.service
  352. ModemManager.service systemd-udev-settle.service
  353. multipathd.service systemd-udev-trigger.service
  354. multi-user.target systemd-update-done.service
  355. multi-user.target.wants systemd-update-utmp-runlevel.service
  356. netcf-transaction.service systemd-update-utmp.service
  357. NetworkManager-dispatcher.service systemd-user-sessions.service
  358. NetworkManager.service systemd-vconsole-setup.service
  359. NetworkManager-wait-online.service system.slice
  360. network-online.target system-update.target
  361. network-online.target.wants system-update.target.wants
  362. network-pre.target target.service
  363. network.target tcsd.service
  364. nfs-blkmap.service teamd@.service
  365. nfs-client.target timers.target
  366. nfs-config.service timers.target.wants
  367. nfs-idmapd.service time-sync.target
  368. nfs-idmap.service tmp.mount
  369. nfs-lock.service tomcat
  370. nfslock.service tuned.service
  371. nfs-mountd.service udisks2.service
  372. nfs-rquotad.service umount.target
  373. nfs-secure.service upower.service
  374. nfs-server.service usb_modeswitch@.service
  375. nfs.service usbmuxd.service
  376. nfs-utils.service user.slice
  377. nss-lookup.target var-lib-nfs-rpc_pipefs.mount
  378. nss-user-lookup.target vgauthd.service
  379. ntpdate.service virt-guest-shutdown.target
  380. ntpd.service virtlockd.service
  381. numad.service virtlockd.socket
  382. oddjobd.service virtlogd.service
  383. packagekit-offline-update.service virtlogd.socket
  384. packagekit.service vmtoolsd.service
  385. paths.target wacom-inputattach@.service
  386. plymouth-halt.service wpa_supplicant.service
  387. plymouth-kexec.service zram.service
  388. [root@slave01 system]# mv tomcat tomcat.service
  389. [root@slave01 system]# systemctl daemon-reload
  390. [root@slave01 system]# systemctl start tomcat.service
  391. [root@slave01 system]# systemctl status tomcat.service
  392. ● tomcat.service - Tomcat
  393. Loaded: loaded (/usr/lib/systemd/system/tomcat.service; disabled; vendor preset: disabled)
  394. Active: active (running) since 五 2024-04-19 19:04:17 CST; 13s ago
  395. Process: 4903 ExecStart=/usr/local/tomcat/bin/startup.sh (code=exited, status=0/SUCCESS)
  396. Main PID: 4918 (catalina.sh)
  397. CGroup: /system.slice/tomcat.service
  398. ├─4918 /bin/sh /usr/local/tomcat/bin/catalina.sh start
  399. └─4919 /usr/bin/java -Djava.util.logging.config.file=/usr/local/tomcat/conf/loggi...
  400. 419 19:04:17 slave01 systemd[1]: Starting Tomcat...
  401. 419 19:04:17 slave01 startup.sh[4903]: Using CATALINA_BASE: /usr/local/tomcat
  402. 419 19:04:17 slave01 startup.sh[4903]: Using CATALINA_HOME: /usr/local/tomcat
  403. 419 19:04:17 slave01 startup.sh[4903]: Using CATALINA_TMPDIR: /usr/local/tomcat/temp
  404. 419 19:04:17 slave01 startup.sh[4903]: Using JRE_HOME: /usr
  405. 419 19:04:17 slave01 startup.sh[4903]: Using CLASSPATH: /usr/local/tomcat/bin/b...jar
  406. 419 19:04:17 slave01 systemd[1]: Started Tomcat.
  407. Hint: Some lines were ellipsized, use -l to show in full.
  408. [root@slave01 system]# systemctl enable tomcat.service
  409. Created symlink from /etc/systemd/system/multi-user.target.wants/tomcat.service to /usr/lib/systemd/system/tomcat.service.
  410. [root@slave01 system]#

实验: 做动静分离  

nginx

192.168.11.11

tomcat192.168.11.13
tomcat192.168.11.14

1 三台机器分别安装 nginx(参考我 虎 赵)    tomcat(我 虎 赵)

1 编译 反向代里 动静分离文本

 

 

去静态 也就是nginx添加文本 

 

 

2 去tomcat配置(192.168.11.13)动态文本

 

3 检测

二 MySQL MHA

mysql编译安装:

master192.168.11.7
slave01192.168.11.13
slave02192.168.11.14
manager192.168.11.9

1 四台主机关闭防火墙 防护 

2 下载安装环境  安装包

3 创建mysql用户及解压mysql压缩包  软连接

4 编译安装及安装路径

5 更改属主/属主  修改配置文件

6 设置环境变量  加载

7 启动与自启动

  1. [root@slave01 ~]#
  2. [root@slave01 ~]# cd /opt
  3. [root@slave01 opt]# rz -E
  4. rz waiting to receive.
  5. [root@slave01 opt]# rz -E
  6. rz waiting to receive.
  7. [root@slave01 opt]# ls
  8. boost_1_59_0.tar.gz mysql-5.7.17.tar.gz rh
  9. [root@slave01 opt]#
  10. [root@slave01 opt]# yum -y install gcc gcc-c++ ncurses ncurses-devel bison cmake
  11. 已加载插件:fastestmirror, langpacks
  12. yum -y install gcc gcc-c++ cmake bison bison-devel zlib-devel libcurl-devel libarchive-devel boost-devel ncurses-devel gnutls-devel libxml2-devel openssl-devel libevent-devel libaio-devel
  13. 创建mysql用户及解压mysql压缩包
  14. [root@slave01 opt]# useradd -s /sbin/nologin mysql
  15. [root@slave01 opt]# ls
  16. boost_1_59_0.tar.gz mysql-5.7.17.tar.gz rh
  17. [root@slave01 opt]# tar xf mysql-5.7.17.tar.gz
  18. [root@slave01 opt]# tar xf boost_1_59_0.tar.gz -C /usr/local
  19. [root@slave01 opt]# cd /usr/local
  20. [root@slave01 local]# ls
  21. apache-tomcat-9.0.16 boost_1_59_0 games jdk lib libexec share tomcat
  22. bin etc include jdk1.8.0_291 lib64 sbin src
  23. [root@slave01 local]# ln -s boost_1_59_0/ boost
  24. [root@slave01 local]# ls
  25. apache-tomcat-9.0.16 boost etc include jdk1.8.0_291 lib64 sbin src
  26. bin boost_1_59_0 games jdk lib libexec share tomcat
  27. [root@slave01 local]#
  28. 编译安装及安装路径
  29. [root@slave01 local]# cd /opt/mysql-5.7.17/
  30. [root@slave01 mysql-5.7.17]# cmake \
  31. > -DCMAKE_INSTALL_PREFIX=/usr/local/mysql \
  32. > -DMYSQL_UNIX_ADDR=/usr/local/mysql/mysql.sock \
  33. > -DSYSCONFDIR=/etc \
  34. > -DSYSTEMD_PID_DIR=/usr/local/mysql \
  35. > -DDEFAULT_CHARSET=utf8 \
  36. > -DDEFAULT_COLLATION=utf8_general_ci \
  37. > -DWITH_EXTRA_CHARSETS=all \
  38. > -DWITH_INNOBASE_STORAGE_ENGINE=1 \
  39. > -DWITH_ARCHIVE_STORAGE_ENGINE=1 \
  40. > -DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
  41. > -DWITH_PERFSCHEMA_STORAGE_ENGINE=1 \
  42. > -DMYSQL_DATADIR=/usr/local/mysql/data \
  43. > -DWITH_BOOST=/usr/local/boost \
  44. > -DWITH_SYSTEMD=1
  45. [root@slave01 mysql-5.7.17]# make -j 4 && make install #需要很长时间
  46. chown -R mysql:mysql /usr/local/mysql/
  47. #更改管理主/
  48. chown -R mysql:mysql /usr/local/mysql/
  49. 修改配置文件
  50. [root@slave01 mysql-5.7.17]# vim /etc/my.cnf
  51. [client]
  52. port = 3306
  53. default-character-set=utf8
  54. socket=/usr/local/mysql/mysql.sock
  55. [mysql]
  56. port = 3306
  57. default-character-set=utf8
  58. socket=/usr/local/mysql/mysql.sock
  59. auto-rehash
  60. [mysqld]
  61. user = mysql
  62. basedir=/usr/local/mysql
  63. datadir=/usr/local/mysql/data
  64. port = 3306
  65. character-set-server=utf8
  66. pid-file = /usr/local/mysql/mysqld.pid
  67. socket=/usr/local/mysql/mysql.sock
  68. bind-address = 0.0.0.0
  69. skip-name-resolve
  70. max_connections=2048
  71. default-storage-engine=INNODB
  72. max_allowed_packet=16M
  73. server-id = 1
  74. sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_AUTO_VALUE_ON_ZERO,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,PIPES_AS_CONCAT,ANSI_QUOTES
  75. 设置环境变量
  76. [root@slave01 mysql-5.7.17]# echo "PATH=$PATH:/usr/local/mysql/bin" >> /etc/profile
  77. [root@slave01 mysql-5.7.17]# source /etc/profile
  78. [root@slave01 mysql-5.7.17]#
  79. [root@slave01 mysql-5.7.17]# cd /usr/local/mysql/bin/
  80. [root@slave01 bin]#
  81. [root@slave01 bin]# ./mysqld \
  82. > --initialize-insecure \
  83. > --user=mysql \
  84. > --basedir=/usr/local/mysql \
  85. > --datadir=/usr/local/mysql/data
  86. 2024-04-19T15:27:32.812209Z 0 [Warning] TIMESTAMP with implicit DEFAULT value is deprecated. Please use --explicit_defaults_for_timestamp server option (see documentation for more details).
  87. 2024-04-19T15:27:33.117578Z 0 [Warning] InnoDB: New log files created, LSN=45790
  88. 2024-04-19T15:27:33.146953Z 0 [Warning] InnoDB: Creating foreign key constraint system tables.
  89. 2024-04-19T15:27:33.221546Z 0 [Warning] No existing UUID has been found, so we assume that this is the first time that this server has been started. Generating a new UUID: 57baeda1-fe61-11ee-bf36-000c291fe803.
  90. 2024-04-19T15:27:33.223747Z 0 [Warning] Gtid table is not ready to be used. Table 'mysql.gtid_executed' cannot be opened.
  91. 2024-04-19T15:27:33.225148Z 1 [Warning] root@localhost is created with an empty password ! Please consider switching off the --initialize-insecure option.
  92. 2024-04-19T15:27:33.416709Z 1 [Warning] 'user' entry 'root@localhost' ignored in --skip-name-resolve mode.
  93. 2024-04-19T15:27:33.416742Z 1 [Warning] 'user' entry 'mysql.sys@localhost' ignored in --skip-name-resolve mode.
  94. 2024-04-19T15:27:33.416756Z 1 [Warning] 'db' entry 'sys mysql.sys@localhost' ignored in --skip-name-resolve mode.
  95. 2024-04-19T15:27:33.416766Z 1 [Warning] 'proxies_priv' entry '@ root@localhost' ignored in --skip-name-resolve mode.
  96. 2024-04-19T15:27:33.416792Z 1 [Warning] 'tables_priv' entry 'sys_config mysql.sys@localhost' ignored in --skip-name-resolve mode.
  97. [root@slave01 bin]#
  98. [root@slave01 bin]# cp /usr/local/mysql/usr/lib/systemd/system/mysqld.service /usr/lib/systemd/system/
  99. [root@slave01 bin]#
  100. [root@slave01 bin]# systemctl daemon-reload
  101. [root@slave01 bin]# systemctl start mysqld.service
  102. [root@slave01 bin]# systemctl status mysqld.service
  103. ● mysqld.service - MySQL Server
  104. Loaded: loaded (/usr/lib/systemd/system/mysqld.service; disabled; vendor preset: disabled)
  105. Active: active (running) since 六 2024-04-20 00:09:17 CST; 8s ago
  106. Docs: man:mysqld(8)
  107. http://dev.mysql.com/doc/refman/en/using-systemd.html
  108. Process: 22677 ExecStart=/usr/local/mysql/bin/mysqld --daemonize --pid-file=/usr/local/mysql/mysqld.pid $MYSQLD_OPTS (code=exited, status=0/SUCCESS)
  109. Process: 22657 ExecStartPre=/usr/local/mysql/bin/mysqld_pre_systemd (code=exited, status=0/SUCCESS)
  110. Main PID: 22680 (mysqld)
  111. CGroup: /system.slice/mysqld.service
  112. └─22680 /usr/local/mysql/bin/mysqld --daemonize --pid-file=/usr/local/mysql/mysql...
  113. 420 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.907483Z 0 [Warning] 'db' entr...de.
  114. 420 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.907493Z 0 [Warning] 'proxies_...de.
  115. 4月 20 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.908342Z 0 [Warning] 'tables_p...de.
  116. 420 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.912412Z 0 [Note] Event Schedu...nts
  117. 420 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.912647Z 0 [Note] Executing 'S...ck.
  118. 4月 20 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.912656Z 0 [Note] Beginning of...les
  119. 4月 20 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.930205Z 0 [Note] End of list ...les
  120. 4月 20 00:09:17 slave02 mysqld[22677]: 2024-04-19T16:09:17.930343Z 0 [Note] /usr/local/m...ns.
  121. 4月 20 00:09:17 slave02 mysqld[22677]: Version: '5.7.17' socket: '/usr/local/mysql/mysq...ion
  122. 420 00:09:17 slave02 systemd[1]: Started MySQL Server.
  123. Hint: Some lines were ellipsized, use -l to show in full.
  124. [root@slave02 bin]# systemctl enable mysqld.service
  125. Created symlink from /etc/systemd/system/multi-user.target.wants/mysqld.service to /usr/lib/systemd/system/mysqld.service.
  126. [root@slave02 bin]# netstat -natp |grep mysql
  127. tcp 0 0 0.0.0.0:3306 0.0.0.0:* LISTEN 22680/mysqld
  128. [root@slave02 bin]# mysqladmin -uroot -p password "123"
  129. Enter password:
  130. mysqladmin: [Warning] Using a password on the command line interface can be insecure.
  131. Warning: Since password will be sent to server in plain text, use ssl connection to ensure password safety.
  132. [root@slave02 bin]# mysql -uroot -p123
  133. mysql: [Warning] Using a password on the command line interface can be insecure.
  134. Welcome to the MySQL monitor. Commands end with ; or \g.
  135. Your MySQL connection id is 4
  136. Server version: 5.7.17 Source distribution
  137. Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
  138. Oracle is a registered trademark of Oracle Corporation and/or its
  139. affiliates. Other names may be trademarks of their respective
  140. owners.
  141. Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
  142. mysql>
  143. mysql> show databases;
  144. +--------------------+
  145. | Database |
  146. +--------------------+
  147. | information_schema |
  148. | mysql |
  149. | performance_schema |
  150. | sys |
  151. +--------------------+
  152. 4 rows in set (0.01 sec)
  153. mysql> exit
  154. Bye
  155. [root@slave02 bin]#
  1. [client]
  2. port = 3306
  3. default-character-set=utf8
  4. socket=/usr/local/mysql/mysql.sock
  5. [mysql]
  6. port = 3306
  7. default-character-set=utf8
  8. socket=/usr/local/mysql/mysql.sock
  9. auto-rehash
  10. [mysqld]
  11. user = mysql
  12. basedir=/usr/local/mysql
  13. datadir=/usr/local/mysql/data
  14. port = 3306
  15. character-set-server=utf8
  16. pid-file = /usr/local/mysql/mysqld.pid
  17. socket=/usr/local/mysql/mysql.sock
  18. bind-address = 0.0.0.0
  19. skip-name-resolve
  20. max_connections=2048
  21. default-storage-engine=INNODB
  22. max_allowed_packet=16M
  23. server-id = 1
  24. sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_AUTO_VALUE_ON_ZERO,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,PIPES_AS_CONCAT,ANSI_QUOTES

8 做个定时计划 

 9 修改 Master01、Slave01、Slave02 节点的 Mysql主配置文件/etc/my.cnf

10 在 Master01、Slave01、Slave02 节点上都创建两个软链接

  1. ln -s /usr/local/mysql/bin/mysql /usr/sbin/
  2. ln -s /usr/local/mysql/bin/mysqlbinlog /usr/sbin/
  3. ls /usr/sbin/mysql* #查看软连接
  4. /usr/sbin/mysql /usr/sbin/mysqlbinlog

11 在 Master、Slave1、Slave2 节点上都进行主从同步的授权

  1. grant replication slave on *.* to 'myslave'@'192.168.246.%' identified by '123123';
  2. grant all privileges on *.* to 'mha'@'192.168.246.%' identified by 'manager';
  3. grant all privileges on *.* to 'mha'@'master' identified by 'manager';
  4. grant all privileges on *.* to 'mha'@'slave1' identified by 'manager';
  5. grant all privileges on *.* to 'mha'@'slave2' identified by 'manager';
  6. flush privileges;

主从同时登陆

给从服务器授权slave01 服务器配置 

验证:在主服务器创建 

  1. mysql> flush privilieges;
  2. ERROR 1064 (42000): You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'privilieges' at line 1
  3. mysql> flush privileges;
  4. Query OK, 0 rows affected (0.01 sec)
  5. mysql> show master status;
  6. +-------------------+----------+--------------+------------------+-------------------+
  7. | File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
  8. +-------------------+----------+--------------+------------------+-------------------+
  9. | master-bin.000001 | 603 | | | |
  10. +-------------------+----------+--------------+------------------+-------------------+
  11. 1 row in set (0.00 sec)
  12. mysql> create database mcb;
  13. Query OK, 1 row affected (0.01 sec)
  14. mysql> show databases;
  15. +--------------------+
  16. | Database |
  17. +--------------------+
  18. | information_schema |
  19. | mcb |
  20. | mysql |
  21. | performance_schema |
  22. | sys |
  23. +--------------------+
  24. 5 rows in set (0.01 sec)
  25. mysql>

从服务器验证

 12  配置MHA所有组件,所有服务器上都安装 MHA 依赖的环境,首先安装 epel 源

  1. yum install epel-release --nogpgcheck -y
  2. yum install -y perl-DBD-MySQL \
  3. > perl-Config-Tiny \
  4. > perl-Log-Dispatch \
  5. > perl-Parallel-ForkManager \
  6. > perl-ExtUtils-CBuilder \
  7. > perl-ExtUtils-MakeMaker \
  8. > perl-CPAN

13 四台服务器安装环境

14 在所有服务器上必须先安装node组件,最后在MHA-manager节点上安装manager组件,因为manager依赖node组件。 

在 MHA manager 节点上安装 manager 组件

manege免密交互 

  1. [root@elk01 mha4mysql-manager-0.57]#ssh-keygen -t rsa
  2. Generating public/private rsa key pair.
  3. Enter file in which to save the key (/root/.ssh/id_rsa):
  4. Created directory '/root/.ssh'.
  5. Enter passphrase (empty for no passphrase):
  6. Enter same passphrase again:
  7. Your identification has been saved in /root/.ssh/id_rsa.
  8. Your public key has been saved in /root/.ssh/id_rsa.pub.
  9. The key fingerprint is:
  10. SHA256:HvUbXKX4fSndq2yADEyKmlwWJ+iJlHOVwma49qdcGKY root@elk01
  11. The key's randomart image is:
  12. +---[RSA 2048]----+
  13. | = ... .|
  14. | * O o . . o |
  15. |+ B * + . . o |
  16. |.= * . o . o o..o|
  17. |o O o S . +..o+|
  18. | E o o . + . o. o|
  19. | . + . o . |
  20. | o ... |
  21. | .o |
  22. +----[SHA256]-----+
  23. [root@elk01 mha4mysql-manager-0.57]#ssh-copy-id 192.168.11.7
  24. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  25. The authenticity of host '192.168.11.7 (192.168.11.7)' can't be established.
  26. ECDSA key fingerprint is SHA256:uQfWnfl20Yj/iVllTVL3GAe3b5oPUj7IkhfWji2tF4Y.
  27. ECDSA key fingerprint is MD5:23:93:1c:28:77:cc:64:8c:b6:fb:4a:c2:90:9c:b5:1a.
  28. Are you sure you want to continue connecting (yes/no)? yes
  29. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  30. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  31. root@192.168.11.7's password:
  32. Number of key(s) added: 1
  33. Now try logging into the machine, with: "ssh '192.168.11.7'"
  34. and check to make sure that only the key(s) you wanted were added.
  35. [root@elk01 mha4mysql-manager-0.57]#ssh-copy-id 192.168.11.13
  36. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  37. The authenticity of host '192.168.11.13 (192.168.11.13)' can't be established.
  38. ECDSA key fingerprint is SHA256:yxbaJImj8mJsF3SNpt1dlUq4RCnL5sn8R7NJNBhCQIs.
  39. ECDSA key fingerprint is MD5:8b:67:9d:ff:25:ae:d2:81:f0:a0:ca:f6:af:ef:31:b1.
  40. Are you sure you want to continue connecting (yes/no)? yes
  41. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  42. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  43. root@192.168.11.13's password:
  44. Number of key(s) added: 1
  45. Now try logging into the machine, with: "ssh '192.168.11.13'"
  46. and check to make sure that only the key(s) you wanted were added.
  47. [root@elk01 mha4mysql-manager-0.57]#ssh-copy-id 192.168.11.14
  48. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  49. The authenticity of host '192.168.11.14 (192.168.11.14)' can't be established.
  50. ECDSA key fingerprint is SHA256:JAQ3v9JIlkv3lauqQxhRmSga7GPl5zIOv0THdDWT1TU.
  51. ECDSA key fingerprint is MD5:d3:64:b1:26:6c:a5:f3:50:38:b2:db:ab:07:67:fe:00.
  52. Are you sure you want to continue connecting (yes/no)? yes
  53. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  54. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  55. root@192.168.11.14's password:
  56. Number of key(s) added: 1
  57. Now try logging into the machine, with: "ssh '192.168.11.14'"
  58. and check to make sure that only the key(s) you wanted were added.

主服务器做免密

  1. [root@master01 mha4mysql-node-0.57]# ssh-keygen -t rsa
  2. Generating public/private rsa key pair.
  3. Enter file in which to save the key (/root/.ssh/id_rsa):
  4. Enter passphrase (empty for no passphrase):
  5. Enter same passphrase again:
  6. Your identification has been saved in /root/.ssh/id_rsa.
  7. Your public key has been saved in /root/.ssh/id_rsa.pub.
  8. The key fingerprint is:
  9. SHA256:vAZw2+fhzyIb49b+VAl10Vg7mUw2BY5rJGU5EuIgEwo root@master01
  10. The key's randomart image is:
  11. +---[RSA 2048]----+
  12. |E +.. . ..o.oOB|
  13. | . . o o ..oo+=.*|
  14. | . . . . ..+..* |
  15. | o + o o ..|
  16. | o S o o o |
  17. | . = o . |
  18. | =.o . |
  19. | oooo+ |
  20. | .o+.o+ |
  21. +----[SHA256]-----+
  22. [root@master01 mha4mysql-node-0.57]# ssh-copy-id 192.168.11.13
  23. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  24. The authenticity of host '192.168.11.13 (192.168.11.13)' can't be established.
  25. ECDSA key fingerprint is SHA256:yxbaJImj8mJsF3SNpt1dlUq4RCnL5sn8R7NJNBhCQIs.
  26. ECDSA key fingerprint is MD5:8b:67:9d:ff:25:ae:d2:81:f0:a0:ca:f6:af:ef:31:b1.
  27. Are you sure you want to continue connecting (yes/no)? yes
  28. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  29. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  30. root@192.168.11.13's password:
  31. Number of key(s) added: 1
  32. Now try logging into the machine, with: "ssh '192.168.11.13'"
  33. and check to make sure that only the key(s) you wanted were added.
  34. [root@master01 mha4mysql-node-0.57]# ssh-copy-id 192.168.11.14
  35. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  36. The authenticity of host '192.168.11.14 (192.168.11.14)' can't be established.
  37. ECDSA key fingerprint is SHA256:JAQ3v9JIlkv3lauqQxhRmSga7GPl5zIOv0THdDWT1TU.
  38. ECDSA key fingerprint is MD5:d3:64:b1:26:6c:a5:f3:50:38:b2:db:ab:07:67:fe:00.
  39. Are you sure you want to continue connecting (yes/no)? yes
  40. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  41. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  42. root@192.168.11.14's password:
  43. Number of key(s) added: 1
  44. Now try logging into the machine, with: "ssh '192.168.11.14'"
  45. and check to make sure that only the key(s) you wanted were added.

从1服务器做免密

  1. [root@slave01 mha4mysql-node-0.57]# ssh-keygen -t rsa
  2. Generating public/private rsa key pair.
  3. Enter file in which to save the key (/root/.ssh/id_rsa):
  4. Enter passphrase (empty for no passphrase):
  5. Enter same passphrase again:
  6. Your identification has been saved in /root/.ssh/id_rsa.
  7. Your public key has been saved in /root/.ssh/id_rsa.pub.
  8. The key fingerprint is:
  9. SHA256:yZwFzSDAbfdK2ybyqwU2VwfLx4EcK4bhN6I2t6mhsMw root@slave01
  10. The key's randomart image is:
  11. +---[RSA 2048]----+
  12. | ..oo o=oo. |
  13. | ..o+.o== . |
  14. | .+.=.* + |
  15. | . *.B.o |
  16. | + =.S+ |
  17. | . +.*+ o |
  18. |. . oo.o |
  19. |oo . o .. |
  20. |.E. . .... |
  21. +----[SHA256]-----+
  22. 您在 /var/spool/mail/root 中有新邮件
  23. [root@slave01 mha4mysql-node-0.57]# ssh-copy-id 192.168.11.14
  24. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  25. The authenticity of host '192.168.11.14 (192.168.11.14)' can't be established.
  26. ECDSA key fingerprint is SHA256:JAQ3v9JIlkv3lauqQxhRmSga7GPl5zIOv0THdDWT1TU.
  27. ECDSA key fingerprint is MD5:d3:64:b1:26:6c:a5:f3:50:38:b2:db:ab:07:67:fe:00.
  28. Are you sure you want to continue connecting (yes/no)? yes
  29. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  30. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  31. root@192.168.11.14's password:
  32. Number of key(s) added: 1
  33. Now try logging into the machine, with: "ssh '192.168.11.14'"
  34. and check to make sure that only the key(s) you wanted were added.
  35. 您在 /var/spool/mail/root 中有新邮件
  36. [root@slave01 mha4mysql-node-0.57]# ssh-copy-id 192.168.11.7
  37. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  38. The authenticity of host '192.168.11.7 (192.168.11.7)' can't be established.
  39. ECDSA key fingerprint is SHA256:uQfWnfl20Yj/iVllTVL3GAe3b5oPUj7IkhfWji2tF4Y.
  40. ECDSA key fingerprint is MD5:23:93:1c:28:77:cc:64:8c:b6:fb:4a:c2:90:9c:b5:1a.
  41. Are you sure you want to continue connecting (yes/no)? yes
  42. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  43. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  44. root@192.168.11.7's password:
  45. Number of key(s) added: 1
  46. Now try logging into the machine, with: "ssh '192.168.11.7'"
  47. and check to make sure that only the key(s) you wanted were added.

从2服务器免密 

  1. [root@slave02 mha4mysql-node-0.57]# ssh-keygen -t rsa
  2. Generating public/private rsa key pair.
  3. Enter file in which to save the key (/root/.ssh/id_rsa):
  4. Enter passphrase (empty for no passphrase):
  5. Enter same passphrase again:
  6. Your identification has been saved in /root/.ssh/id_rsa.
  7. Your public key has been saved in /root/.ssh/id_rsa.pub.
  8. The key fingerprint is:
  9. SHA256:fxWAkdGfr9KqphQ2b40ZUq/kys2thugv5jehXNycyMM root@slave02
  10. The key's randomart image is:
  11. +---[RSA 2048]----+
  12. | o*. |
  13. | o .. |
  14. | . ... |
  15. | . . o. |
  16. | +S+o.. .. |
  17. | .EO+* . .|
  18. | . +.+O o. . |
  19. | *o+=oo. o |
  20. | +o+===ooo |
  21. +----[SHA256]-----+
  22. 您在 /var/spool/mail/root 中有新邮件
  23. [root@slave02 mha4mysql-node-0.57]# ssh-copy-id 192.168.11.7
  24. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  25. The authenticity of host '192.168.11.7 (192.168.11.7)' can't be established.
  26. ECDSA key fingerprint is SHA256:uQfWnfl20Yj/iVllTVL3GAe3b5oPUj7IkhfWji2tF4Y.
  27. ECDSA key fingerprint is MD5:23:93:1c:28:77:cc:64:8c:b6:fb:4a:c2:90:9c:b5:1a.
  28. Are you sure you want to continue connecting (yes/no)? yes
  29. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  30. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  31. root@192.168.11.7's password:
  32. Number of key(s) added: 1
  33. Now try logging into the machine, with: "ssh '192.168.11.7'"
  34. and check to make sure that only the key(s) you wanted were added.
  35. [root@slave02 mha4mysql-node-0.57]# ssh-copy-id 192.168.11.13
  36. /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
  37. The authenticity of host '192.168.11.13 (192.168.11.13)' can't be established.
  38. ECDSA key fingerprint is SHA256:yxbaJImj8mJsF3SNpt1dlUq4RCnL5sn8R7NJNBhCQIs.
  39. ECDSA key fingerprint is MD5:8b:67:9d:ff:25:ae:d2:81:f0:a0:ca:f6:af:ef:31:b1.
  40. Are you sure you want to continue connecting (yes/no)? yes
  41. /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
  42. /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
  43. root@192.168.11.13's password:
  44. Number of key(s) added: 1
  45. Now try logging into the machine, with: "ssh '192.168.11.13'"
  46. and check to make sure that only the key(s) you wanted were added.

15在 manager 节点上配置 MHA

(1)在 manager 节点上复制相关脚本到/usr/local/bin 目录

  1. [root@elk01 mha4mysql-manager-0.57]#cp -rp /opt/mha4mysql-manager-0.57/samples/scripts /usr/local/bin
  2. [root@elk01 mha4mysql-manager-0.57]#ll /usr/local/bin/scripts/
  3. 总用量 32
  4. -rwxr-xr-x 1 1001 1001 3648 531 2015 master_ip_failover
  5. -rwxr-xr-x 1 1001 1001 9870 531 2015 master_ip_online_change
  6. -rwxr-xr-x 1 1001 1001 11867 531 2015 power_manager
  7. -rwxr-xr-x 1 1001 1001 1360 531 2015 send_report
  1. [root@elk01 mha4mysql-manager-0.57]#cp /usr/local/bin/scripts/master_ip_failover /usr/local/bin/
  2. [root@elk01 mha4mysql-manager-0.57]#ll /usr/local/bin
  3. 总用量 88
  4. -r-xr-xr-x 1 root root 16381 420 13:40 apply_diff_relay_logs
  5. -r-xr-xr-x 1 root root 4807 420 13:40 filter_mysqlbinlog
  6. -r-xr-xr-x 1 root root 1995 420 13:52 masterha_check_repl
  7. -r-xr-xr-x 1 root root 1779 420 13:52 masterha_check_ssh
  8. -r-xr-xr-x 1 root root 1865 420 13:52 masterha_check_status
  9. -r-xr-xr-x 1 root root 3201 420 13:52 masterha_conf_host
  10. -r-xr-xr-x 1 root root 2517 420 13:52 masterha_manager
  11. -r-xr-xr-x 1 root root 2165 420 13:52 masterha_master_monitor
  12. -r-xr-xr-x 1 root root 2373 420 13:52 masterha_master_switch
  13. -r-xr-xr-x 1 root root 5171 420 13:52 masterha_secondary_check
  14. -r-xr-xr-x 1 root root 1739 420 13:52 masterha_stop
  15. -rwxr-xr-x 1 root root 3648 420 14:13 master_ip_failover
  16. -r-xr-xr-x 1 root root 8261 420 13:40 purge_relay_logs
  17. -r-xr-xr-x 1 root root 7525 420 13:40 save_binary_logs
  18. drwxr-xr-x 2 1001 1001 103 531 2015 scripts

14 创建 MHA 软件目录并拷贝配置文件,这里使用app1.cnf配置文件来管理 mysql 节点服务器

  1. [root@elk01 mha4mysql-manager-0.57]#vim /usr/local/bin/master_ip_failover
  2. [root@elk01 mha4mysql-manager-0.57]#mkdir /etc/masterha
  3. [root@elk01 mha4mysql-manager-0.57]#vim /usr/local/bin/master_ip_failover
  4. [root@elk01 mha4mysql-manager-0.57]#cp /opt/mha4mysql-manager-0.57/samples/conf/app1.cnf /etc/masterha
  5. [root@elk01 mha4mysql-manager-0.57]#vim /etc/masterha/app1.cnf
  1. [server default]
  2. manager_log=/var/log/masterha/app1/manager.log
  3. manager_workdir=/var/log/masterha/app1
  4. master_binlog_dir=/usr/local/mysql/data
  5. master_ip_failover_script=/usr/local/bin/master_ip_failover
  6. master_ip_online_change_script=/usr/local/bin/master_ip_online_change
  7. password=abc123
  8. user=my
  9. ping_interval=1
  10. remote_workdir=/tmp
  11. repl_password=123
  12. repl_user=myslave
  13. secondary_check_script=/usr/local/bin/masterha_secondary_check -s 192.168.11.13 -s 192.168.11.14
  14. shutdown_script=""
  15. ssh_user=root
  16. [server1]
  17. hostname=192.168.11.7
  18. port=3306
  19. [server2]
  20. candidate_master=1
  21. check_repl_delay=0
  22. hostname=192.168.11.13
  23. port=3306
  24. [server3]
  25. hostname=192.168.11.14
  26. port=3306

15 第一次配置需要在 Master 节点上手动开启虚拟IP地址

  1. [root@master01 opt]# /sbin/ifconfig ens33:1 192.168.11.100/24
  2. [root@master01 opt]# ip a
  3. 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1

16 在 manager 节点上测试 ssh 无密码认证

在 manager 节点上测试 ssh 无密码认证,如果正常最后会输出 successfully,如下所示。

masterha_check_ssh -conf=/etc/masterha/app1.cnf

17 在 manager 节点上测试 mysql 主从连接情况

在 manager 节点上测试 mysql 主从连接情况,最后出现MySQL Replication Health is OK 字样说明正常。如下所示。

输入它查看情况

[root@elk01 mha4mysql-manager-0.57]#masterha_check_repl -conf=/etc/masterha/app1.cnf

在 manager 节点上启动 MHA 

nohup masterha_manager --conf=/etc/masterha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/masterha/app1/manager.log 2>&1 &
  1. [root@elk01 mha4mysql-manager-0.57]#nohup masterha_manager --conf=/etc/masterha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/masterha/app1/manager.log 2>&1 &
  2. [1] 10192
  3. [root@elk01 mha4mysql-manager-0.57]#ps -aux|grep manager
  4. root 10192 0.5 0.5 297380 21752 pts/0 S 17:20 0:00 perl /usr/local/bin/masterha_manager --conf=/etc/masterha/app1.cnf --remove_dead_master_conf --ignore_last_failover
  5. root 10295 0.0 0.0 112824 984 pts/0 S+ 17:21 0:00 grep --color=auto manager

在 manager 节点上查看 MHA 状态 和 MHA 日志,可以看到 master的地址

  1. masterha_check_status --conf=/etc/masterha/app1.cnf
  1. [root@elk01 mha4mysql-manager-0.57]#masterha_check_status --conf=/etc/masterha/app1.cnf
  2. app1 (pid:10192) is running(0:PING_OK), master:192.168.11.7
  3. [root@elk01 mha4mysql-manager-0.57]#cat /var/log/masterha/app1/manager.log | grep "current master"
  4. Sat Apr 20 17:20:56 2024 - [info] Checking SSH publickey authentication settings on the current master..
  5. 192.168.11.7(192.168.11.7:3306) (current master)

查看master 的 VIP 地址 192.168.10.200 是否存在

这个 VIP 地址不会因为 manager 节点停止 MHA 服务而消失。

故障模拟

#在 manager 节点上监控观察日志记录

tail -f /var/log/masterha/app1/manager.log

同时去Master 节点 master 上停止mysql服务

再去manager服务器上的日志查看

查看 slave1 是否接管 VIP,使用ifconfig 

正常自动切换一次后,MHA 进程会退出。HMA 会自动修改 app1.cnf 文件内容,将宕机的 master 节点删除。查看 slave1 是否接管 VIP

故障修复

修复原来的master(即修复原来的主节点)

修复主从

在新的主库服务器 slave1 中查看二进制日志文件和同步点

show master status;

在原主库服务器 master执行同步操作,同步现在主库中的数据

change master to master_host='192.168.11.13',master_user='myslave',master_password='123',master_log_file='master-bin.000005',master_log_pos=154;

在 manager 节点上修改配置文件app1.cnf

重新把三台mysql节点服务器这个记录添加进去,因为它检测到主节点失效时候会自动删除主节点

将slave01添加为新的候选master01

在 manager 节点上启动 MHA

 nohup masterha_manager --conf=/etc/masterha/app1.cnf --remove_dead_master_conf --ignore_last_failover < /dev/null > /var/log/masterha/app1/manager.log 2>&1 &

安装tomcat编译:192.168.11.14

  1. #使用二进制安装jdk
  2. [root@slave02 ~]# mkdir /data
  3. [root@slave02 ~]# cd /data
  4. [root@slave02 data]# rz -E
  5. rz waiting to receive.
  6. [root@slave02 data]# rz -E
  7. rz waiting to receive.
  8. [root@slave02 data]# ls
  9. apache-tomcat-9.0.16.tar.gz jdk-8u291-linux-x64.tar.gz
  10. [root@slave02 data]# tar jdk-8u291-linux-x64.tar.gz
  11. tar: 旧选项“g”需要参数。
  12. 请用“tar --help”或“tar --usage”获得更多信息。
  13. [root@slave02 data]# tar xf jdk-8u291-linux-x64.tar.gz
  14. [root@slave02 data]# ls
  15. apache-tomcat-9.0.16.tar.gz jdk1.8.0_291 jdk-8u291-linux-x64.tar.gz
  16. [root@slave02 data]# rm -rf jdk1.8.0_291/
  17. [root@slave02 data]# tar xf jdk-8u291-linux-x64.tar.gz -C /usr/local
  18. [root@slave02 data]# ls /usr/local
  19. bin boost_1_59_0 games jdk1.8.0_291 lib64 mysql share
  20. boost etc include lib libexec sbin src
  21. [root@slave02 data]# cd /usr/local
  22. [root@slave02 local]# ln -s jdk1.8.0_291/ jdk
  23. [root@slave02 local]# ls
  24. bin boost_1_59_0 games jdk lib libexec sbin src
  25. boost etc include jdk1.8.0_291 lib64 mysql share
  26. [root@slave02 local]# ll
  27. 总用量 4
  28. drwxr-xr-x. 2 root root 6 115 2016 bin
  29. lrwxrwxrwx. 1 root root 13 419 23:49 boost -> boost_1_59_0/
  30. drwx------. 8 501 games 4096 812 2015 boost_1_59_0
  31. drwxr-xr-x. 2 root root 6 115 2016 etc
  32. drwxr-xr-x. 2 root root 6 115 2016 games
  33. drwxr-xr-x. 2 root root 6 115 2016 include
  34. lrwxrwxrwx. 1 root root 13 420 08:57 jdk -> jdk1.8.0_291/ #软连接显示
  35. drwxr-xr-x. 8 10143 10143 273 48 2021 jdk1.8.0_291
  36. drwxr-xr-x. 2 root root 6 115 2016 lib
  37. drwxr-xr-x. 2 root root 6 115 2016 lib64
  38. drwxr-xr-x. 2 root root 6 115 2016 libexec
  39. drwxr-xr-x. 12 mysql mysql 229 420 00:09 mysql
  40. drwxr-xr-x. 2 root root 6 115 2016 sbin
  41. drwxr-xr-x. 5 root root 49 315 19:21 share
  42. drwxr-xr-x. 2 root root 6 115 2016 src
  43. [root@slave02 local]# vim /etc profile.d/env.sh
  44. 还有 2 个文件等待编辑
  45. [root@slave02 local]# vim /etc/profile.d/env.sh
  46. [root@slave02 local]# cat /etc/profile.d/env.sh
  47. # java home
  48. export JAVA_HOME=/usr/local/jdk #切记路径要一致,否则型号会报错
  49. export PATH=$JAVA_HOME/bin:$PATH
  50. export JRE_HOME=$JAVA_HOME/jre
  51. export CLASSPATH=$JAVA_HOME/lib/:$JRE_HOME/lib/
  52. [root@slave02 local]# java -version
  53. java version "1.8.0_291"
  54. Java(TM) SE Runtime Environment (build 1.8.0_291-b10)
  55. Java HotSpot(TM) 64-Bit Server VM (build 25.291-b10, mixed mode)
  56. [root@slave02 data]# ls
  57. apache-tomcat-9.0.16.tar.gz jdk-8u291-linux-x64.tar.gz
  58. [root@slave02 data]# cp -r apache-tomcat-9.0.16.tar.gz /usr/local
  59. [root@slave02 data]# cd /usr/local
  60. [root@slave02 local]# ls
  61. apache-tomcat-9.0.16.tar.gz boost_1_59_0 include lib mysql src
  62. bin etc jdk lib64 sbin
  63. boost games jdk1.8.0_291 libexec share
  64. [root@slave02 local]# tar xf apache-tomcat-9.0.16.tar.gz
  65. [root@slave02 local]# ls
  66. apache-tomcat-9.0.16 boost games jdk1.8.0_291 libexec share
  67. apache-tomcat-9.0.16.tar.gz boost_1_59_0 include lib mysql src
  68. bin etc jdk lib64 sbin
  69. [root@slave02 local]# ln -s apache-tomcat-9.0.16/ tomcat
  70. [root@slave02 local]# cd tomcat/
  71. [root@slave02 tomcat]# ls
  72. bin conf lib logs README.md RUNNING.txt webapps
  73. BUILDING.txt CONTRIBUTING.md LICENSE NOTICE RELEASE-NOTES temp work
  74. 新建用户tomcat 并修改其属主和属组,权限
  75. [root@slave02 tomcat]# useradd tomcat -s /sbin/nologin -M
  76. [root@slave02 tomcat]# chown tomcat:tomcat /usr/local/tomcat/ -R
  77. [root@slave02 tomcat]# cat > /usr/lib/systemd/system/tomcat.service <<EOF
  78. > [Unit]
  79. > Description=Tomcat
  80. > After=syslog.target network.target
  81. >
  82. > [Service]
  83. > Type=forking
  84. > ExecStart=/usr/local/tomcat/bin/startup.sh
  85. > ExecStop=/usr/local/tomcat/bin/shutdown.sh
  86. > RestartSec=3
  87. > PrivateTmp=true
  88. > User=tomcat
  89. > Group=tomcat
  90. >
  91. > [Install]
  92. > WantedBy=multi-user.target
  93. > EOF
  94. [root@slave02 tomcat]# systemctl start tomcat
  95. [root@slave02 tomcat]# systemctl status tomcat
  96. ● tomcat.service - Tomcat
  97. Loaded: loaded (/usr/lib/systemd/system/tomcat.service; disabled; vendor preset: disabled)
  98. Active: active (running) since 六 2024-04-20 09:35:19 CST; 6s ago
  99. Process: 34542 ExecStart=/usr/local/tomcat/bin/startup.sh (code=exited, status=0/SUCCESS)
  100. Main PID: 34557 (catalina.sh)
  101. CGroup: /system.slice/tomcat.service
  102. ├─34557 /bin/sh /usr/local/tomcat/bin/catalina.sh start
  103. └─34558 /usr/bin/java -Djava.util.logging.config.file=/usr/local/tomcat/conf...
  104. 420 09:35:19 slave02 systemd[1]: Starting Tomcat...
  105. 420 09:35:19 slave02 startup.sh[34542]: Using CATALINA_BASE: /usr/local/tomcat
  106. 420 09:35:19 slave02 startup.sh[34542]: Using CATALINA_HOME: /usr/local/tomcat
  107. 420 09:35:19 slave02 startup.sh[34542]: Using CATALINA_TMPDIR: /usr/local/tomcat/temp
  108. 420 09:35:19 slave02 startup.sh[34542]: Using JRE_HOME: /usr
  109. 420 09:35:19 slave02 startup.sh[34542]: Using CLASSPATH: /usr/local/tomcat/...ar
  110. 420 09:35:19 slave02 systemd[1]: Started Tomcat.
  111. Hint: Some lines were ellipsized, use -l to show in full.
  112. [root@slave02 tomcat]# systemctl enable tomcat
  113. Created symlink from /etc/systemd/system/multi-user.target.wants/tomcat.service to /usr/lib/systemd/system/tomcat.service.
  114. [root@slave02 tomcat]# pstree -p | grep tomcat
  115. [root@slave02 tomcat]# pstree -p | grep java
  116. |-catalina.sh(34557)---java(34558)-+-{java}(34559)
  117. | |-{java}(34560)
  118. | |-{java}(34561)
  119. | |-{java}(34562)
  120. |
  121. [root@slave02 tomcat]# ps aux
  122. USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
  123. root 1 0.1 0.1 193680 5076 ? Ss 08:05 0:07 /usr/lib/systemd/systemd
  124. root 2 0.0 0.0 0 0 ? S 08:05 0:00 [kthreadd]
  125. root 34510 0.0 0.0 0 0 ? S 09:32 0:00 [kworker/12:0]
  126. tomcat 34557 0.0 0.0 113408 680 ? S 09:35 0:00 /bin/sh /usr/local/tomca
  127. tomcat 34558 3.6 3.3 6872068 127728 ? Sl 09:35 0:07 /usr/bin/java -Djava.uti
  128. root 34680 0.0 0.0 108052 352 ? S 09:38 0:00 sleep 60
  129. root 34681 0.0 0.0 151212 1844 pts/2 R+ 09:38 0:00 ps aux
  130. [root@slave02 tomcat]#

编译安装keeplived:192.168.11.11

  1. [root@mcb-11 ~]# yum install keepalived.x86_64 -y
  2. 已加载插件:fastestmirror, langpacks
  3. [root@mcb-11 ~]# cd /etc/keepalived/
  4. [root@mcb-11 keepalived]# ls
  5. keepalived.conf
  6. [root@mcb-11 keepalived]# cp keepalived.conf keepalived.conf.bak
  7. [root@mcb-11 keepalived]# ls
  8. keepalived.conf keepalived.conf.bak
  9. [root@mcb-11 keepalived]# vim keepalived.conf

三 做 ELK:192.168.11.9

  1. 做主机名映射
  2. [root@elk01 ~]#echo "192.168.11.9 elk01" >> /etc/hosts
  3. 安装 ElasticSearch-rpm包
  4. [root@elk01 ~]#cd /opt
  5. [root@elk01 opt]#ls
  6. mha4mysql-manager-0.57 perl-Config-Tiny perl-Log-Dispatch
  7. mha4mysql-manager-0.57.tar.gz perl-CPAN perl-Parallel-ForkManager
  8. mha4mysql-node-0.57 perl-ExtUtils-CBuilder rh
  9. mha4mysql-node-0.57.tar.gz perl-ExtUtils-MakeMaker
  10. [root@elk01 opt]#rz -E
  11. rz waiting to receive.
  12. [root@elk01 opt]#rpm -ivh elasticsearch-5.5.0.rpm
  13. 警告:elasticsearch-5.5.0.rpm: 头V4 RSA/SHA512 Signature, 密钥 ID d88e42b4: NOKEY
  14. 准备中... ################################# [100%]
  15. Creating elasticsearch group... OK
  16. Creating elasticsearch user... OK
  17. 加载系统服务
  18. [root@elk01 opt]#systemctl daemon-reload
  19. [root@elk01 opt]#systemctl start elasticsearch.service
  20. [root@elk01 opt]#systemctl enable elasticsearch.service
  21. 修改 elasticsearch 主配置文件
  22. [root@elk01 opt]# cp /etc/elasticsearch/elasticsearch.yml /etc/elasticsearch/elasticsearch.yml.bak
  23. #备份配置文件
  24. [root@elk01 opt]# vim /etc/elasticsearch/elasticsearch.yml
  25. ##17行,取消注释,指定群集名称
  26. cluster.name: my-elk-cluster
  27. ##23行,取消注释,指定节点名称()
  28. node.name: node1
  29. ##33行,取消注释,指定数据存放路径
  30. path.data: /data/elk_data
  31. ##37行,取消注释,指定日志存放路径
  32. path.logs: /var/log/elasticsearch/
  33. ##43行,取消注释,不在启动的时候锁定内存(前端缓存,与IOPS-性能测试方式,每秒读写次数相关)
  34. bootstrap.memory_lock: false
  35. ##55行,取消注释,设置监听地址,0.0.0.0代表所有地址
  36. network.host: 0.0.0.0
  37. ##59行,取消注释,ES服务的默认监听端口为9200
  38. http.port: 9200
  39. ##68行,取消注释,集群发现通过单播实现,指定要发现的节点node1、node2
  40. discovery.zen.ping.unicast.hosts: ["elk01"]
  41. 查看我们修改的内容
  42. [root@elk01 elasticsearch]#vim /etc/elasticsearch/elasticsearch.yml
  43. [root@elk01 elasticsearch]#grep -v "^#" /etc/elasticsearch/elasticsearch.yml
  44. cluster.name: my-elk-cluster
  45. node.name: elk01
  46. path.data: /data/elk_data
  47. bootstrap.memory_lock: false
  48. network.host: 0.0.0.0
  49. http.port: 9200
  50. discovery.zen.ping.unicast.hosts: ["elk01"]
  51. 创建数据存放路径并授权
  52. [root@elk01 elasticsearch]# mkdir -p /data/elk_data
  53. [root@elk01 elasticsearch]#
  54. [root@elk01 elasticsearch]#chown elasticsearch:elasticsearch /data/elk_data/
  55. 启动 elasticsearch
  56. [root@elk01 elasticsearch]#systemctl start elasticsearch.service
  57. [root@elk01 elasticsearch]#netstat -natp | grep 9200
  58. tcp6 0 0 127.0.0.1:9200 :::* LISTEN 16953/java
  59. tcp6 0 0 ::1:9200 :::* LISTEN 16953/java
  60. 启动 elasticsearch-head 服务
  61. [root@elk01 elasticsearch]#systemctl start elasticsearch.service
  62. [root@elk01 elasticsearch]#netstat -natp | grep 9200
  63. tcp6 0 0 127.0.0.1:9200 :::* LISTEN 16953/java
  64. tcp6 0 0 ::1:9200 :::* LISTEN 16953/java

查看节点信息

报错原因一般时配置文件

浏览器访问 http://192.168.11.11:9200

 安装 Elasticsearch-head 插件

  1. [root@elk01 elasticsearch]#cd /opt
  2. [root@elk01 opt]#ls
  3. elasticsearch-5.5.0.rpm mha4mysql-node-0.57.tar.gz perl-ExtUtils-MakeMaker
  4. mha4mysql-manager-0.57 perl-Config-Tiny perl-Log-Dispatch
  5. mha4mysql-manager-0.57.tar.gz perl-CPAN perl-Parallel-ForkManager
  6. mha4mysql-node-0.57 perl-ExtUtils-CBuilder rh
  7. [root@elk01 opt]#yum install -y gcc gcc-c++ make
  8. [root@elk01 opt]#rz -E
  9. rz waiting to receive.
  10. [root@elk01 opt]#
  11. [root@elk01 opt]#tar xf node-v8.2.1.tar.gz
  12. [root@elk01 opt]#cd node-v8.2.1/
  13. [root@elk01 node-v8.2.1]#./configure
  14. creating ./icu_config.gypi
  15. [root@elk01 node-v8.2.1]#make -j 4 && make install #需要编译很长时间
  16. 安装phantomjs
  17. [root@elk01 node-v8.2.1]#rz -E
  18. rz waiting to receive.
  19. [root@elk01 node-v8.2.1]#rz -E
  20. rz waiting to receive.
  21. [root@elk01 node-v8.2.1]#cd /opt
  22. [root@elk01 opt]#ls
  23. elasticsearch-5.5.0.rpm node-v8.2.1 perl-ExtUtils-MakeMaker
  24. mha4mysql-manager-0.57 node-v8.2.1.tar.gz perl-Log-Dispatch
  25. mha4mysql-manager-0.57.tar.gz perl-Config-Tiny perl-Parallel-ForkManager
  26. mha4mysql-node-0.57 perl-CPAN rh
  27. mha4mysql-node-0.57.tar.gz perl-ExtUtils-CBuilder
  28. [root@elk01 opt]#cd ..
  29. [root@elk01 /]#cd ../
  30. [root@elk01 /]#cd ~
  31. [root@elk01 ~]# cd
  32. [root@elk01 ~]#cd /opt/node-v8.2.1/
  33. [root@elk01 node-v8.2.1]#ls
  34. android-configure configure node.gyp
  35. AUTHORS CONTRIBUTING.md node.gypi
  36. benchmark deps out
  37. BSDmakefile doc phantomjs-2.1.1-linux-x86_64.tar.bz2
  38. BUILDING.md elasticsearch-head.tar.gz README.md
  39. CHANGELOG.md GOVERNANCE.md src
  40. CODE_OF_CONDUCT.md icu_config.gypi test
  41. COLLABORATOR_GUIDE.md lib tools
  42. common.gypi LICENSE vcbuild.bat
  43. config.gypi Makefile
  44. config.mk node
  45. [root@elk01 node-v8.2.1]#mv elasticsearch-head.tar.gz phantomjs-2.1.1-linux-x86_64.tar.bz2 /opt
  46. [root@elk01 node-v8.2.1]#cd /opt
  47. [root@elk01 opt]#laa
  48. bash: laa: 未找到命令...
  49. [root@elk01 opt]#ls
  50. elasticsearch-5.5.0.rpm perl-Config-Tiny
  51. elasticsearch-head.tar.gz perl-CPAN
  52. mha4mysql-manager-0.57 perl-ExtUtils-CBuilder
  53. mha4mysql-manager-0.57.tar.gz perl-ExtUtils-MakeMaker
  54. mha4mysql-node-0.57 perl-Log-Dispatch
  55. mha4mysql-node-0.57.tar.gz perl-Parallel-ForkManager
  56. node-v8.2.1 phantomjs-2.1.1-linux-x86_64.tar.bz2
  57. node-v8.2.1.tar.gz rh
  58. [root@elk01 opt]#tar xf phantomjs-2.1.1-linux-x86_64.tar.bz2 -C /usr/local/src
  59. [root@elk01 opt]#cd /usr/local/src/phantomjs-2.1.1-linux-x86_64/
  60. [root@elk01 phantomjs-2.1.1-linux-x86_64]#ls
  61. bin ChangeLog examples LICENSE.BSD README.md third-party.txt
  62. [root@elk01 phantomjs-2.1.1-linux-x86_64]#cd bin
  63. [root@elk01 bin]#ls
  64. phantomjs
  65. [root@elk01 bin]#cp phantomjs /usr/local/bin
  66. 安装 Elasticsearch-head 数据可视化工具
  67. [root@elk01 bin]#cp phantomjs /usr/local/bin
  68. [root@elk01 bin]#cd /opt
  69. [root@elk01 opt]#ls
  70. elasticsearch-5.5.0.rpm perl-Config-Tiny
  71. elasticsearch-head.tar.gz perl-CPAN
  72. mha4mysql-manager-0.57 perl-ExtUtils-CBuilder
  73. mha4mysql-manager-0.57.tar.gz perl-ExtUtils-MakeMaker
  74. mha4mysql-node-0.57 perl-Log-Dispatch
  75. mha4mysql-node-0.57.tar.gz perl-Parallel-ForkManager
  76. node-v8.2.1 phantomjs-2.1.1-linux-x86_64.tar.bz2
  77. node-v8.2.1.tar.gz rh
  78. [root@elk01 opt]#tar xf elasticsearch-head.tar.gz -C /usr/local/src
  79. [root@elk01 opt]#cd /usr/local/src
  80. [root@elk01 src]#cd elasticsearch-head/
  81. [root@elk01 elasticsearch-head]#npm install
  82. npm WARN deprecated fsevents@1.2.13: The v1 package contains DANGEROUS / INSECURE binaries. Upgrade to safe fsevents v2
  83. npm WARN optional SKIPPING OPTIONAL DEPENDENCY: fsevents@^1.0.0 (node_modules/karma/node_modules/chokidar/node_modules/fsevents):
  84. npm WARN notsup SKIPPING OPTIONAL DEPENDENCY: Unsupported platform for fsevents@1.2.13: wanted {"os":"darwin","arch":"any"} (current: {"os":"linux","arch":"x64"})
  85. npm WARN elasticsearch-head@0.0.0 license should be a valid SPDX license expression
  86. up to date in 6.331s
  87. 修改 Elasticsearch 主配置文件
  88. [root@node1 elasticsearch-head]# vim /etc/elasticsearch/elasticsearch.yml
  89. ##末行添加以下内容
  90. http.cors.enabled: true ##开启跨域访问支持,默认为false
  91. http.cors.allow-origin: "*" ##指定跨域访问允许的域名地址为所有
  92. [root@ekl elasticsearch-head]# systemctl restart elasticsearch.service
  93. [root@elk elasticsearch-head]# netstat -antp | grep 9200
  94. 启动 elasticsearch-head 服务
  95. [root@elk01 elasticsearch-head]#npm run start &
  96. [2] 70435
  97. [root@elk01 elasticsearch-head]#
  98. > elasticsearch-head@0.0.0 start /usr/local/src/elasticsearch-head
  99. > grunt server
  100. Running "connect:server" (connect) task
  101. Waiting forever...
  102. Started connect web server on http://localhost:9100
  103. #此时去浏览器去访问
  104. 通过 Elasticsearch-head 查看 ES 信息

安装Kibana  192.168.11.9

  1. [root@elk01 opt]#rz -E
  2. rz waiting to receive.
  3. [root@elk01 opt]#rpm -ivh kibana-5.5.1-x86_64.rpm
  4. 警告:kibana-5.5.1-x86_64.rpm: 头V4 RSA/SHA512 Signature, 密钥 ID d88e42b4: NOKEY
  5. 准备中... ################################# [100%]
  6. 正在升级/安装...
  7. 1:kibana-5.5.1-1 ################################# [100%]
  8. [root@elk01 opt]#cp /etc/kibana/kibana.yml /etc/kibana/kibana.yml.bar
  9. [root@elk01 opt]#vim /etc/kibana/kibana.yml
  10. [root@elk01 opt]#systemctl start kibana.service
  11. [root@elk01 opt]#systemctl enable kibana.service
  12. Created symlink from /etc/systemd/system/multi-user.target.wants/kibana.service to /etc/systemd/system/kibana.service.
  13. [root@elk01 opt]#netstat -natp | grep 5601
  14. tcp 0 0 0.0.0.0:5601 0.0.0.0:* LISTEN 72781/node

 安装 filebeat 192.168.11.4

 

安装 logstash 

  1. [root@slave02 filebeat]# cd /opt
  2. [root@slave02 opt]# rz -E
  3. rz waiting to receive.
  4. [root@slave02 opt]# rpm -ivh logstash-5.5.1.rpm
  5. 警告:logstash-5.5.1.rpm: 头V4 RSA/SHA512 Signature, 密钥 ID d88e42b4: NOKEY
  6. 准备中... ################################# [100%]
  7. 正在升级/安装...
  8. 1:logstash-1:5.5.1-1 ################################# [100%]
  9. Using provided startup.options file: /etc/logstash/startup.options
  10. Successfully created system startup script for Logstash
  11. 您在 /var/spool/mail/root 中有新邮件
  12. [root@slave02 opt]# ln -s /usr/share/logstash/bin/logstash /usr/local/bin/
  13. [root@slave02 opt]# systemctl start logstash.service
  14. [root@slave02 opt]# systemctl enable logstash.service
  15. Created symlink from /etc/systemd/system/multi-user.target.wants/logstash.service to /etc/systemd/system/logstash.service.
  16. logstash -e 'input { stdin{} } output { stdout{} }'

 测试

思 查看nginx_accrss日志

192.168.11.9es    head    kafba
192.168.11.12logstash
192.168.11.14logstash

1 先开启es 

2 在nginx_access 修改配置文件

  1. cd /etc/logstash/conf.d/
  2. ls
  3. vim nginx_log.conf
  4. systemctl restart logstash
  5. systemctl status logstash
  6. ls
  7. logstash -f nginx_log.conf
  1. input {
  2. file {
  3. path => "/apps/nginx/logs/access.log"
  4. type => "access"
  5. start_position => "beginning"
  6. }
  7. file {
  8. path => "/apps/nginx/logs/error.log"
  9. type => "error"
  10. start_position => "beginning"
  11. }
  12. }
  13. output {
  14. if [type] == "access" {
  15. elasticsearch {
  16. hosts => ["192.168.11.9:9200"]
  17. index => "nginx_access-%{+YYYY.MM.dd}"
  18. }
  19. }
  20. if [type] == "error" {
  21. elasticsearch {
  22. hosts => ["192.168.11.9:9200"]
  23. index => "nginx_error-%{+YYYY.MM.dd}"
  24. }
  25. }
  26. }

3 重启服务 

 

 五 查看mysql_error 日志

1先开启es    head

  1. [root@slave02 conf.d]# cat mysql_log.conf
  2. input {
  3. file {
  4. path => "/usr/local/mysql/data/error.log"
  5. start_position => "beginning"
  6. sincedb_path => "/dev/null"
  7. type => "mysql_error"
  8. }
  9. }
  10. output {
  11. elasticsearch {
  12. hosts => ["192.168.11.9:9200"]
  13. index => "mysql.error-%{+YYYY.MM.dd}"
  14. }
  15. }

[root@slave02 conf.d]# logstash -f mysql_log.conf

2 编译MySQL 上logstash文本 

  1. [root@slave02 ROOT]# cd /etc/logstash/
  2. [root@slave02 logstash]# vim mysql_error.conf
  3. [root@slave02 logstash]# systemctl restart mysql
  4. [root@slave02 conf.d]# logstash -f mysql_error.conf

六 两个域名访问同页面

192.168.11.11ha01  nginx
192.168.11.12
浏览器

1 在192.168.11.11 添加 

  1. [root@ha01 ~]# cd /apps/nginx/
  2. [root@ha01 nginx]# ls
  3. client_body_temp fastcgi_temp logs sbin uwsgi_temp
  4. conf html proxy_temp scgi_temp
  5. [root@ha01 nginx]# cd conf/
  6. [root@ha01 conf]# vim nginx.conf

  1. [root@ha01 conf]# cd /opt/html/
  2. [root@ha01 html]# ls
  3. index.html
  4. [root@ha01 html]# cat index.html
  5. ACB
  6. [root@ha01 html]# cat /etc/host
  7. cat: /etc/host: 没有那个文件或目录
  8. [root@ha01 html]# cat /etc/hosts
  9. 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
  10. ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
  11. 192.168.11.11 www.benet.com
  12. 192.168.11.12 www.kgc.com

 2 去192.168.11.14添加

  1. [root@ha02 opt]# cd html/
  2. [root@ha02 html]# ls
  3. [root@ha02 html]# cat /apps/nginx/conf/nginx.conf

[root@ha02 conf.d]# vim /etc/hosts

  1. [root@ha02 opt]# cd html/
  2. [root@ha02 html]# ls
  3. index.html
  4. [root@ha02 html]# cat index.html
  5. ABC

给真机添加该访问的域名 

 

 

 

3 去浏览器检测: 

需要有个缓存时间

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Cpp五条/article/detail/474396?site
推荐阅读
相关标签
  

闽ICP备14008679号