当前位置:   article > 正文

2023云计算国赛样题梳理_2023全国技能大赛云计算应用赛项 私有云 容器云 公有云 答案

2023全国技能大赛云计算应用赛项 私有云 容器云 公有云 答案

typora-root

模块三 公有云(40)

任务1 公有云服务搭建(5分)

3.1.1 私有网络管理

在公有云中完成虚拟私有云网络的创建。

3.1.2 云实例管理

登录公有云平台,创建两台云实例虚拟机。
3.1.3 管理数据库

使用intnetX-mysql网络创建两台chinaskill-sql-1和chinaskill-sql-2云服务器,并完成MongoDB安装。

  1. ##创建云服务器与上一步一样
  2. ##node1与node2进行一样的操作
  3. [root@node1 ~]# vi /etc/yum.repos.d/
  4. CentOS-Base.repo CentOS-Debuginfo.repo CentOS-Media.repo CentOS-Vault.repo epel.repo.rpmnew
  5. CentOS-CR.repo CentOS-fasttrack.repo CentOS-Sources.repo epel.repo epel-testing.repo
  6. [root@node1 ~]# vi /etc/yum.repos.d/mongodb-org-4.0.repo
  7. [root@node1 ~]# cat /etc/yum.repos.d/mongodb-org-4.0.repo
  8. [mngodb-org]
  9. name=MongoDB Repository
  10. baseurl=http://mirrors.aliyun.com/mongodb/yum/redhat/7Server/mongodb-org/4.0/x86_64/
  11. gpgcheck=0
  12. enabled=1
  13. [root@node1 ~]# yum install mongodb-org -y
  14. [root@node1 ~]# systemctl start mongod
  15. [root@node1 ~]# systemctl enable mongod
  16. [root@node1 ~]# systemctl status mongod
  17. ● mongod.service - MongoDB Database Server
  18. Loaded: loaded (/usr/lib/systemd/system/mongod.service; enabled; vendor preset: disabled)
  19. Active: active (running) since Thu 2023-04-27 10:26:16 CST; 12s ago
  20. Docs: https://docs.mongodb.org/manual
  21. Main PID: 8365 (mongod)
  22. CGroup: /system.slice/mongod.service
  23. └─8365 /usr/bin/mongod -f /etc/mongod.conf
  24. Apr 27 10:26:15 node1 systemd[1]: Starting MongoDB Database Server...
  25. Apr 27 10:26:15 node1 mongod[8362]: about to fork child process, waiting until server is ready for connections.
  26. Apr 27 10:26:15 node1 mongod[8362]: forked process: 8365
  27. Apr 27 10:26:16 node1 mongod[8362]: child process started successfully, parent exiting
  28. Apr 27 10:26:16 node1 systemd[1]: Started MongoDB Database Server.

3.1.4 主从数据库

在chinaskill-sql-1和chinaskill-sql-2云服务器中配置MongoDB主从数据库。

  1. #修改配置文件
  2. [root@node1 ~]# vi /etc/mongod.conf
  3. bindIp: 0.0.0.0
  4. replication:
  5. replSetName: test
  6. #重启服务
  7. [root@node1 ~]# systemctl restart mongod.service
  8. #查看集群状态
  9. [root@node1 ~]# mongo
  10. > rs.status()
  11. {
  12. "ok" : 0,
  13. "errmsg" : "no replset config has been received",
  14. "code" : 94,
  15. "codeName" : "NotYetInitialized"
  16. }
  17. >
  18. 两个节点都和上图一样说明复制集正常启动了
  19. #定义一个有关于主从同步的变量
  20. > var config={_id:"test",members:
  21. ... [{_id:0,host:"172.16.2.197:27017"},
  22. ... {_id:1,host:"172.16.2.145:27017"}]}
  23. > rs.initiate(config)
  24. #初始化MongoDB的主从配置
  25. > rs.initiate(config)
  26. { "ok" : 1 }
  27. #查看状态
  28. > rs.status()
  29. {
  30. "set" : "test",
  31. "date" : ISODate("2023-04-27T02:38:55.312Z"),
  32. "myState" : 2,
  33. "term" : NumberLong(1),
  34. "syncingTo" : "172.16.2.197:27017",
  35. "syncSourceHost" : "172.16.2.197:27017",
  36. "syncSourceId" : 0,
  37. "heartbeatIntervalMillis" : NumberLong(2000),
  38. "optimes" : {
  39. "lastCommittedOpTime" : {
  40. "ts" : Timestamp(1682563127, 1),
  41. "t" : NumberLong(1)
  42. },
  43. "readConcernMajorityOpTime" : {
  44. "ts" : Timestamp(1682563127, 1),
  45. "t" : NumberLong(1)
  46. },
  47. "appliedOpTime" : {
  48. "ts" : Timestamp(1682563127, 1),
  49. "t" : NumberLong(1)
  50. },
  51. "durableOpTime" : {
  52. "ts" : Timestamp(1682563127, 1),
  53. "t" : NumberLong(1)
  54. }
  55. },
  56. "lastStableCheckpointTimestamp" : Timestamp(1682563087, 6),
  57. "electionParticipantMetrics" : {
  58. "votedForCandidate" : true,
  59. "electionTerm" : NumberLong(1),
  60. "lastVoteDate" : ISODate("2023-04-27T02:38:07.656Z"),
  61. "electionCandidateMemberId" : 0,
  62. "voteReason" : "",
  63. "lastAppliedOpTimeAtElection" : {
  64. "ts" : Timestamp(1682563077, 1),
  65. "t" : NumberLong(-1)
  66. },
  67. "maxAppliedOpTimeInSet" : {
  68. "ts" : Timestamp(1682563077, 1),
  69. "t" : NumberLong(-1)
  70. },
  71. "priorityAtElection" : 1,
  72. "newTermStartDate" : ISODate("2023-04-27T02:38:07.657Z"),
  73. "newTermAppliedDate" : ISODate("2023-04-27T02:38:08.145Z")
  74. },
  75. "members" : [
  76. {
  77. "_id" : 0,
  78. "name" : "172.16.2.197:27017",
  79. "health" : 1,
  80. "state" : 1,
  81. "stateStr" : "PRIMARY",
  82. "uptime" : 56,
  83. "optime" : {
  84. "ts" : Timestamp(1682563127, 1),
  85. "t" : NumberLong(1)
  86. },
  87. "optimeDurable" : {
  88. "ts" : Timestamp(1682563127, 1),
  89. "t" : NumberLong(1)
  90. },
  91. "optimeDate" : ISODate("2023-04-27T02:38:47Z"),
  92. "optimeDurableDate" : ISODate("2023-04-27T02:38:47Z"),
  93. "lastHeartbeat" : ISODate("2023-04-27T02:38:54.642Z"),
  94. "lastHeartbeatRecv" : ISODate("2023-04-27T02:38:53.657Z"),
  95. "pingMs" : NumberLong(0),
  96. "lastHeartbeatMessage" : "",
  97. "syncingTo" : "",
  98. "syncSourceHost" : "",
  99. "syncSourceId" : -1,
  100. "infoMessage" : "",
  101. "electionTime" : Timestamp(1682563087, 1),
  102. "electionDate" : ISODate("2023-04-27T02:38:07Z"),
  103. "configVersion" : 1
  104. },
  105. {
  106. "_id" : 1,
  107. "name" : "172.16.2.145:27017",
  108. "health" : 1,
  109. "state" : 2,
  110. "stateStr" : "SECONDARY",
  111. "uptime" : 323,
  112. "optime" : {
  113. "ts" : Timestamp(1682563127, 1),
  114. "t" : NumberLong(1)
  115. },
  116. "optimeDate" : ISODate("2023-04-27T02:38:47Z"),
  117. "syncingTo" : "172.16.2.197:27017",
  118. "syncSourceHost" : "172.16.2.197:27017",
  119. "syncSourceId" : 0,
  120. "infoMessage" : "",
  121. "configVersion" : 1,
  122. "self" : true,
  123. "lastHeartbeatMessage" : ""
  124. }
  125. ],
  126. "ok" : 1,
  127. "operationTime" : Timestamp(1682563127, 1),
  128. "$clusterTime" : {
  129. "clusterTime" : Timestamp(1682563127, 1),
  130. "signature" : {
  131. "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
  132. "keyId" : NumberLong(0)
  133. }
  134. }
  135. }
  136. test:SECONDARY>
  137. #从节点查看
  138. [root@node2]# mongo
  139. > rs.status()
  140. {
  141. "set" : "test",
  142. "date" : ISODate("2022-09-25T07:55:18.477Z"),
  143. "myState" : 2,
  144. "term" : NumberLong(1),
  145. "syncingTo" : "192.168.200.11:27017",
  146. "syncSourceHost" : "192.168.200.11:27017",
  147. "syncSourceId" : 0,
  148. "heartbeatIntervalMillis" : NumberLong(2000),
  149. "optimes" : {
  150. "lastCommittedOpTime" : {
  151. "ts" : Timestamp(1664092517, 1),
  152. "t" : NumberLong(1)
  153. },
  154. "readConcernMajorityOpTime" : {
  155. "ts" : Timestamp(1664092517, 1),
  156. "t" : NumberLong(1)
  157. },
  158. "appliedOpTime" : {
  159. "ts" : Timestamp(1664092517, 1),
  160. "t" : NumberLong(1)
  161. },
  162. "durableOpTime" : {
  163. "ts" : Timestamp(1664092517, 1),
  164. "t" : NumberLong(1)
  165. }
  166. },
  167. "lastStableCheckpointTimestamp" : Timestamp(1664092457, 7),
  168. "electionParticipantMetrics" : {
  169. "votedForCandidate" : true,
  170. "electionTerm" : NumberLong(1),
  171. "lastVoteDate" : ISODate("2022-09-25T07:54:17.523Z"),
  172. "electionCandidateMemberId" : 0,
  173. "voteReason" : "",
  174. "lastAppliedOpTimeAtElection" : {
  175. "ts" : Timestamp(1664092447, 1),
  176. "t" : NumberLong(-1)
  177. },
  178. "maxAppliedOpTimeInSet" : {
  179. "ts" : Timestamp(1664092447, 1),
  180. "t" : NumberLong(-1)
  181. },
  182. "priorityAtElection" : 1,
  183. "newTermStartDate" : ISODate("2022-09-25T07:54:17.525Z"),
  184. "newTermAppliedDate" : ISODate("2022-09-25T07:54:18.396Z")
  185. },
  186. "members" : [
  187. {
  188. "_id" : 0,
  189. "name" : "192.168.200.11:27017",
  190. "health" : 1,
  191. "state" : 1,
  192. "stateStr" : "PRIMARY",
  193. "uptime" : 69,
  194. "optime" : {
  195. "ts" : Timestamp(1664092507, 1),
  196. "t" : NumberLong(1)
  197. },
  198. "optimeDurable" : {
  199. "ts" : Timestamp(1664092507, 1),
  200. "t" : NumberLong(1)
  201. },
  202. "optimeDate" : ISODate("2022-09-25T07:55:07Z"),
  203. "optimeDurableDate" : ISODate("2022-09-25T07:55:07Z"),
  204. "lastHeartbeat" : ISODate("2022-09-25T07:55:16.508Z"),
  205. "lastHeartbeatRecv" : ISODate("2022-09-25T07:55:17.618Z"),
  206. "pingMs" : NumberLong(0),
  207. "lastHeartbeatMessage" : "",
  208. "syncingTo" : "",
  209. "syncSourceHost" : "",
  210. "syncSourceId" : -1,
  211. "infoMessage" : "",
  212. "electionTime" : Timestamp(1664092457, 1),
  213. "electionDate" : ISODate("2022-09-25T07:54:17Z"),
  214. "configVersion" : 1
  215. },
  216. {
  217. "_id" : 1,
  218. "name" : "192.168.200.12:27017",
  219. "health" : 1,
  220. "state" : 2,
  221. "stateStr" : "SECONDARY",
  222. "uptime" : 669,
  223. "optime" : {
  224. "ts" : Timestamp(1664092517, 1),
  225. "t" : NumberLong(1)
  226. },
  227. "optimeDate" : ISODate("2022-09-25T07:55:17Z"),
  228. "syncingTo" : "192.168.200.11:27017",
  229. "syncSourceHost" : "192.168.200.11:27017",
  230. "syncSourceId" : 0,
  231. "infoMessage" : "",
  232. "configVersion" : 1,
  233. "self" : true,
  234. "lastHeartbeatMessage" : ""
  235. }
  236. ],
  237. "ok" : 1,
  238. "operationTime" : Timestamp(1664092517, 1),
  239. "$clusterTime" : {
  240. "clusterTime" : Timestamp(1664092517, 1),
  241. "signature" : {
  242. "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
  243. "keyId" : NumberLong(0)
  244. }
  245. }
  246. }
  247. test:SECONDARY>
  248. #验证主从
  249. ##在主库上,我们来创建数据
  250. test:PRIMARY> use test
  251. switched to db test
  252. test:PRIMARY> db.createCollection("test")
  253. {
  254. "ok" : 1,
  255. "operationTime" : Timestamp(1664092674, 1),
  256. "$clusterTime" : {
  257. "clusterTime" : Timestamp(1664092674, 1),
  258. "signature" : {
  259. "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
  260. "keyId" : NumberLong(0)
  261. }
  262. }
  263. }
  264. test:PRIMARY> show databases
  265. admin 0.000GB
  266. config 0.000GB
  267. local 0.000GB
  268. test 0.000GB
  269. #接下来,我们来到从库上,来查看我们在主库上创建的结果是否出现在了从库上。
  270. test:SECONDARY> show dbs
  271. 2022-09-25T15:58:54.336+0800 E QUERY [js] Error: listDatabases failed:{
  272. "operationTime" : Timestamp(1664092727, 1),
  273. "ok" : 0,
  274. "errmsg" : "not master and slaveOk=false",
  275. "code" : 13435,
  276. "codeName" : "NotMasterNoSlaveOk",
  277. "$clusterTime" : {
  278. "clusterTime" : Timestamp(1664092727, 1),
  279. "signature" : {
  280. "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
  281. "keyId" : NumberLong(0)
  282. }
  283. }
  284. } :
  285. _getErrorWithCode@src/mongo/shell/utils.js:25:13
  286. Mongo.prototype.getDBs@src/mongo/shell/mongo.js:151:1
  287. shellHelper.show@src/mongo/shell/utils.js:882:13
  288. shellHelper@src/mongo/shell/utils.js:766:15
  289. @(shellhelp2):1:1
  290. #从上图可以看出,我们在从库上查询的操作失败,这是因为在配置了MongoDB的主从同步后,在MongoDB的从库上,没有读和写的权限,因此我们无法查看数据。
  291. #设置从节点可读
  292. test:SECONDARY> db.getMongo().setSlaveOk()
  293. #同步成功
  294. test:SECONDARY> show dbs
  295. admin 0.000GB
  296. config 0.000GB
  297. local 0.000GB
  298. test 0.000

3.1.5 node环境管理

使用提供的压缩文件,安装Node.js环境。

  1. [root@chinaskill-node-1-0002 ~]# wget https://nodejs.org/dist/v13.11.0/node-v13.11.0-linux-x64.tar.xz
  2. --2023-05-04 08:41:12-- https://nodejs.org/dist/v13.11.0/node-v13.11.0-linux-x64.tar.xz
  3. Resolving nodejs.org (nodejs.org)... 104.20.22.46, 104.20.23.46, 2606:4700:10::6814:172e, ...
  4. Connecting to nodejs.org (nodejs.org)|104.20.22.46|:443... connected.
  5. HTTP request sent, awaiting response... 200 OK
  6. Length: 21225324 (20M) [application/x-xz]
  7. Saving to: ‘node-v13.11.0-linux-x64.tar.xz’
  8. 100%[==================================================================>] 21,225,324 1.36MB/s in 16s
  9. 2023-05-04 08:41:29 (1.30 MB/s) - ‘node-v13.11.0-linux-x64.tar.xz’ saved [21225324/21225324]
  10. [root@chinaskill-node-1-0002 ~]# ll
  11. total 20728
  12. -rw-r--r-- 1 root root 21225324 Mar 12 2020 node-v13.11.0-linux-x64.tar.xz
  13. [root@chinaskill-node-1-0002 ~]# tar -xvf node-v13.11.0-linux-x64.tar.xz
  14. [root@chinaskill-node-1-0002 ~]# ll
  15. total 20732
  16. drwxr-xr-x 6 1001 1001 4096 Mar 12 2020 node-v13.11.0-linux-x64
  17. -rw-r--r-- 1 root root 21225324 Mar 12 2020 node-v13.11.0-linux-x64.tar.xz
  18. [root@chinaskill-node-1-0002 ~]# cd node-v13.11.0-linux-x64
  19. [root@chinaskill-node-1-0002 node-v13.11.0-linux-x64]# ll
  20. total 176
  21. drwxr-xr-x 2 1001 1001 4096 Mar 12 2020 bin
  22. -rw-r--r-- 1 1001 1001 54110 Mar 12 2020 CHANGELOG.md
  23. drwxr-xr-x 3 1001 1001 4096 Mar 12 2020 include
  24. drwxr-xr-x 3 1001 1001 4096 Mar 12 2020 lib
  25. -rw-r--r-- 1 1001 1001 77130 Mar 12 2020 LICENSE
  26. -rw-r--r-- 1 1001 1001 26524 Mar 12 2020 README.md
  27. drwxr-xr-x 5 1001 1001 4096 Mar 12 2020 share
  28. [root@chinaskill-node-1-0002 node-v13.11.0-linux-x64]# cd bin/
  29. [root@chinaskill-node-1-0002 bin]# ./node -v
  30. v13.11.0
  31. [root@chinaskill-node-1-0002 ~]# ln -s /root/node-v13.11.0-linux-x64/bin/node /usr/local/bin/node
  32. [root@chinaskill-node-1-0002 ~]# ln -s /root/node-v13.11.0-linux-x64/bin/npm /usr/local/bin/npm
  33. [root@chinaskill-node-1-0002 ~]# node -v
  34. v13.11.0

3.1.6 安全组管理

3.1.7 RocketChat上云

使用http服务器提供文件,将Rocket.Chat应用部署上云。

  1. ##上传rocketchat-cloud.tar.gz,进行解压
  2. [root@chinaskill-node-1-0002 ~]# tar -xvf rocketchat-cloud.tar.gz
  3. [root@chinaskill-node-1-0002 ~]# cd rocketchat
  4. [root@chinaskill-node-1-0002 rocketchat]# ll
  5. total 304780
  6. drwxr-xr-x 2 root root 4096 Sep 28 2022 mongodb
  7. drwxr-xr-x 2 root root 4096 Sep 28 2022 node
  8. -rw-r--r-- 1 root root 167869971 Sep 28 2022 rocket.chat.tgz
  9. -rw-r--r-- 1 root root 144210495 Sep 28 2022 yum.tar.gz
  10. [root@chinaskill-node-1-0002 rocketchat]# cd node/
  11. [root@chinaskill-node-1-0002 node]# ll
  12. total 22592
  13. -rw-r--r-- 1 root root 23131523 Apr 5 2022 node-v12.22.12-linux-x64.tar.gz
  14. [root@chinaskill-node-1-0002 node]#
  15. [root@chinaskill-node-1-0002 node]# tar -zxvf node-v12.22.12-linux-x64.tar.gz -C /root/node
  16. ##修改环境变量S
  17. [root@chinaskill-node-1-0002 node]# tail -1 /etc/profile
  18. export PATH=/root/node/node-v12.22.12-linux-x64/bin:$PATH
  19. [root@chinaskill-node-1-0002 node]# node -v
  20. v12.22.12
  21. [root@chinaskill-node-1-0002 node]#
  22. #安装依赖
  23. [root@chinaskill-node-1-0002 ~]# yum install -y gcc-c++ make
  24. [root@chinaskill-node-1-0002 ~]# yum install -y epel-release GraphicsMagick
  25. ##解压rocket.chat.tgz软件包
  26. [root@chinaskill-node-1-0002 rocketchat]# npm config set registry https://registry.npmmirror.com/
  27. [root@chinaskill-node-1-0002 rocketchat]# npm config set ELECTRON_MIRROR https://cdn.npmmirror.com/dist/electron/
  28. #打包
  29. cd /tmp/bundle/programs/server/
  30. npm install
  31. #移到/opt并命名Rocket.Chat
  32. mv /tmp/bundle /opt/Rocket.Chat
  33. #添加用户
  34. useradd -M rocketchat && usermod -L rocketchat
  35. #赋予权限
  36. chown -R rocketchat:rocketchat /opt/Rocket.Chat
  37. #
  38. [root@rocket Rocket.Chat]# vi /lib/systemd/system/rocketchat.service
  39. [Unit]
  40. Description=The Rocket.Chat server
  41. After=network.target remote-fs.target nss-lookup.target nginx.service mongod.service
  42. [Service]
  43. ExecStart=/usr/local/node/bin/node /opt/Rocket.Chat/main.js
  44. StandardOutput=syslog
  45. StandardError=syslog
  46. SyslogIdentifier=rocketchat
  47. User=rocketchat
  48. Environment=MONGO_URL=mongodb://192.168.1.182:27017/rocketchat?replicaSet=rs01 MONGO_OPLOG_URL=mongodb://192.168.1.182:27017/local?replicaSet=rs01 ROOT_URL=http://localhost:3000/ PORT=3000
  49. [Install]
  50. WantedBy=multi-user.target
  51. #启动
  52. [root@rocket Rocket.Chat]# systemctl start rocketchat
  53. [root@rocket Rocket.Chat]# systemctl status rocketchat
  54. ● rocketchat.service - The Rocket.Chat server
  55. Loaded: loaded (/usr/lib/systemd/system/rocketchat.service; disabled; vendor preset: disabled)
  56. Active: active (running) since Mon 2022-10-03 14:43:40 CST; 1min 54s ago
  57. Main PID: 8973 (node)
  58. CGroup: /system.slice/rocketchat.service
  59. └─8973 /usr/local/node/bin/node /opt/Rocket.Chat/main.js
  60. Oct 03 14:44:00 rocket rocketchat[8973]: | MongoDB Engine: wiredTiger |
  61. Oct 03 14:44:00 rocket rocketchat[8973]: | Platform: linux |
  62. Oct 03 14:44:00 rocket rocketchat[8973]: | Process Port: 3000 |
  63. Oct 03 14:44:00 rocket rocketchat[8973]: | Site URL: http://localhost:3000/ |
  64. Oct 03 14:44:00 rocket rocketchat[8973]: | ReplicaSet OpLog: Enabled |
  65. Oct 03 14:44:00 rocket rocketchat[8973]: | Commit Hash: 9b685693fb |
  66. Oct 03 14:44:00 rocket rocketchat[8973]: | Commit Branch: HEAD |
  67. Oct 03 14:44:00 rocket rocketchat[8973]: | |
  68. Oct 03 14:44:00 rocket rocketchat[8973]: +-----------------------------------------------+
  69. Oct 03 14:44:26 rocket rocketchat[8973]: (node:8973) [DEP0005] DeprecationWarning: Buffer() is deprecated due to sec...stead.
  70. Hint: Some lines were ellipsized, use -l to show in full.

3.1.8 NAT网关

根据要求创建一个公网NAT网关。

3.1.9云服务器备份

创建一个云服务器备份存储库名为server_backup,容量为100G。将ChinaSkill-node-1云服务器制作镜像文件chinaskill-image。

3.1.10 负载均衡器

根据要求创建一个负载均衡器chinaskill-elb。

3.1.11 弹性伸缩管理

根据要求新建一个弹性伸缩启动配置。

任务2 公有云服务运维(10分)

3.2.1 云容器引擎

在公有云上,按照要求创建一个x86架构的容器云集群。

3.2.2 云容器管理

使用插件管理在kcloud容器集群中安装Dashboard可视化监控界面。

3.2.3 使用kubectl操作集群

在kcloud集群中安装kubectl命令,使用kubectl命令管理kcloud集群。

  1. #1 安装kubectl
  2. 其实node上是已经安装了kubectl的,但是会报这个错:
  3. [root@kcloud-server ~]# kubectl get node
  4. The connection to the server localhost:8080 was refused - did you specify the right host or port?
  5. #这个其实是因为没有配置文件和环境变量的原因,为了记录整个过程,我把node节点上原有的kubectl卸载了。
  6. 然后重新安装:
  7. 先配置yum源(建议使用阿里的源,华为的不好用,期待完善)
  8. cat <<EOF > /etc/yum.repos.d/kubernetes.repo
  9. [kubernetes]
  10. name=Kubernetes
  11. baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
  12. enabled=1
  13. gpgcheck=1
  14. repo_gpgcheck=1
  15. gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
  16. EOF
  17. 然后开始安装,注意,版本一定要和集群的版本对应
  18. [root@kcloud-server ~]# yum install -y kubectl-1.25.1
  19. 2 下载配置文件
  20. 这个在官网的页面按照操作进行就行(链接:https://console.huaweicloud.com/cce2.0/?agencyId=07fb10cb020026b81f90c00e0ba587f6&region=cn-east-3&locale=zh-cn#/app/resource/cluster/detail/accessAPI?clusterName=test&clusterId=7631fcee-894f-11ec-b434-0255ac1002c7&type=api)
  21. 点这里:
  22. 3 安装和配置kubectl
  23. mkdir -p $HOME/.kube
  24. mv -f kubeconfig.json $HOME/.kube/config
  25. 4 切换kubectl的访问模式
  26. [root@kcloud-server home]# kubectl get node
  27. NAME STATUS ROLES AGE VERSION
  28. 172.16.1.25 Ready <none> 14m v1.25.3-r0-25.1.23
  29. [root@kcloud-server home]#

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-E0IY6a7l-1686127383485)(/k8s1.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ZJQFQg5W-1686127383486)(/k8s2.png)]

3.2.4 安装Helm

使用提供的Helm软件包,在kcloud集群中安装Helm服务。

  1. [root@kcloud-21836 ~]# tar -zxvf helm-v3.10.1-linux-amd64.tar.gz
  2. linux-amd64/
  3. linux-amd64/helm
  4. linux-amd64/LICENSE
  5. linux-amd64/README.md
  6. [root@kcloud-21836 ~]# helm version
  7. version.BuildInfo{Version:"v3.10.1", GitCommit:"9f88ccb6aee40b9a0535fcc7efea6055e1ef72c9", GitTreeState:"clean", GoVersion:"go1.18.7"}

3.2.5 根据提供的chart包mariadb-7.3.14.tgz部署mariadb服务,修改mariadb使用NodePort模式对其进行访问。

  1. [root@kcloud-21836 ~]# helm install mariadb mariadb/
  2. NAME: mariadb
  3. LAST DEPLOYED: Wed May 31 14:53:45 2023
  4. NAMESPACE: default
  5. STATUS: deployed
  6. REVISION: 1
  7. TEST SUITE: None
  8. [root@kcloud-21836 ~]# kubectl get pods
  9. NAME READY STATUS RESTARTS AGE
  10. mariadb-848b94c775-d8np4 1/1 Running 0 29s
  11. mysql-5c6c7986f5-4kr5b 1/1 Running 0 16m
  12. nginx-d5f959df4-4lm2s 1/1 Running 0 6m26s
  13. wordpress-7f9587fb4f-rrncz 1/1 Running 0 16m
  14. [root@kcloud-21836 ~]# kubectl get svc |grep mariadb
  15. mariadb NodePort 10.247.206.107 <none> 3306:32334/TCP 11m

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-EXoqGGhZ-1686127383486)(/mariadb1.png)]

3.2.6 在k8s集群中创建mariadb命名空间,根据提供的chart包mariadb-7.3.14.tgz修改其配置,使用NodePort模式对其进行访问。

  1. [root@kcloud-21836 ~]# cat mariadbnamespace.yaml
  2. apiVersion: v1
  3. kind: Namespace
  4. metadata:
  5. name: mariadb
  6. [root@kcloud-21836 ~]# kubectl create -f mariadbnamespace.yaml
  7. namespace/mariadb created
  8. [root@kcloud-21836 ~]# helm install mariadb mariadb/ -n mariadb
  9. NAME: mariadb
  10. LAST DEPLOYED: Wed May 31 15:09:27 2023
  11. NAMESPACE: mariadb
  12. STATUS: deployed
  13. REVISION: 1
  14. TEST SUITE: None
  15. [root@kcloud-21836 ~]# kubectl get svc
  16. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  17. kubernetes ClusterIP 10.247.0.1 <none> 443/TCP 46m
  18. mariadb NodePort 10.247.206.107 <none> 3306:32334/TCP 15m
  19. mysql ClusterIP 10.247.202.181 <none> 3306/TCP 31m
  20. nginx NodePort 10.247.77.152 <none> 80:30880/TCP 21m
  21. wordpress NodePort 10.247.202.57 <none> 80:30417/TCP 31m
  22. [root@kcloud-21836 ~]# kubectl get namespace
  23. NAME STATUS AGE
  24. default Active 46m
  25. kube-node-lease Active 46m
  26. kube-public Active 46m
  27. kube-system Active 46m
  28. mariadb Active 6m4s
  29. [root@kcloud-21836 ~]# kubectl get svc -n mariadb
  30. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  31. mariadb NodePort 10.247.14.17 <none> 3306:32304/TCP 37s

3.2.7 云硬盘存储卷

按照要求购买云硬盘存储卷。

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-IYSsUO5j-1686127383487)(/云硬盘1.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-qknboSop-1686127383487)(/云硬盘2.png)]

3.2.8 多容器Pod管理

在kcloud集群节点/root目录下编写YAML文件mu-pod.yaml,要求一个pod中包含两个容器。

  1. [root@kcloud-server ~]# vi mu-pod.yaml
  2. apiVersion: v1
  3. kind: Pod
  4. metadata:
  5. name: mu-pod
  6. namespace: default
  7. spec:
  8. containers:
  9. - name: containers01
  10. image: nginx
  11. ports:
  12. - name: http
  13. containerPort: 80
  14. - name: containers02
  15. image: tomcat
  16. ports:
  17. - name: tomcat
  18. containerPort: 80

3.2.9 Namespace管理

在kcloud集群节点/root目录下编写YAML文件my-namespace.yaml并创建namespace。

  1. [root@kcloud-server ~]# vi my-namespace.yaml
  2. apiVersion: v1
  3. kind: Namespace
  4. metadata:
  5. name: test

3.2.10 Secrets管理–Opaque

在master节点/root目录下编写YAML文件secret.yaml,要求执行文件创建密钥。

  1. [root@kcloud-server ~]# vi secret.yaml
  2. apiVersion: v1
  3. kind: Secret
  4. metadata:
  5. name: mysecret
  6. namespace: default
  7. data:
  8. username: YWRtaW4=
  9. password: MWYyZDFlMmU2N2Rm
  10. type: Opaque

3.2.11 私有仓库管理

在master节点添加搭建的本地私有chart仓库源,并上传wordpress-13.0.23.tgz包至chartmuseum私有仓库中。可以使用本地仓库chart源部署应用。

  1. #为/data/charts授予777权限
  2. chmod 777 /data/charts/
  3. #查看svc
  4. [root@kcloud-server ~]# kubectl get svc -n chartmuseum
  5. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  6. chartmuseum ClusterIP 10.247.199.133 <none> 8080/TCP 24m
  7. #添加本地仓库源,name 为 chartmuseum
  8. [root@kcloud-server ~]# helm repo add chartmuseum http://10.247.199.133:8080
  9. "chartmuseum" has been added to your repositories
  10. [root@kcloud-server ~]# helm repo list
  11. NAME URL
  12. chartmuseum http://10.247.199.133:8080
  13. #上传wordpress-13.0.23.tgz 包至 chartmuseum 私有仓库中
  14. [root@kcloud-server ~]# curl --data-binary "@wordpress-13.0.23.tgz" http://10.247.199.133:8080/api/charts
  15. {"saved":true}[root@kcloud-server ~]#
  16. #更新仓库
  17. [root@kcloud-server ~]# helm repo update
  18. Hang tight while we grab the latest from your chart repositories...
  19. ...Successfully got an update from the "chartmuseum" chart repository
  20. Update Complete. ⎈ Happy Helming!
  21. #列出
  22. [root@kcloud-server ~]# helm search repo wordpress
  23. NAME CHART VERSION APP VERSION DESCRIPTION
  24. chartmuseum/wordpress 13.0.23 5.9.2 WordPress is the world's most popular blogging ...
  25. #/data/charts/目录查看
  26. [root@kcloud-server charts]# ls
  27. index-cache.yaml wordpress-13.0.23.tgz

3.2.12 公有云安全:入侵检测系统

使用提供的makechk.tar.gz包安装chkrootkit入侵检测工具,安装完毕后使用chkrootkit工具扫描系。

  1. #购买centos7.9云主机
  2. #上传makechk.tar.gz,chkrootkit.tar.gz软件包
  3. #解压makechk.tar.gz软件
  4. #配置yum源
  5. [root@ecs-cecc ~]# cat /etc/yum.repos.d/local.repo
  6. [local]
  7. name=local
  8. baseurl=file:///root/makechk
  9. gpgcheck=0
  10. enabled=1
  11. [root@ecs-cecc ~]# yum makecache
  12. #安装编译安装依赖包
  13. [root@ecs-cecc packages]# cd /root/ && yum install -y gcc gcc-c++ make glibc*
  14. #解压chkrootkit.tar.gz
  15. #查看目录文件
  16. [root@ecs-cecc ~]# cd chkrootkit-0.55/
  17. [root@ecs-cecc chkrootkit-0.55]# ls
  18. ACKNOWLEDGMENTS chkdirs.c chkproc.c chkrootkit.lsm chkwtmp.c ifpromisc.c patch README.chklastlog strings.c
  19. check_wtmpx.c chklastlog.c chkrootkit chkutmp.c COPYRIGHT Makefile README README.chkwtmp
  20. #编译安装
  21. [root@ecs-cecc chkrootkit-0.55]# make sense
  22. cc -DHAVE_LASTLOG_H -o chklastlog chklastlog.c
  23. cc -DHAVE_LASTLOG_H -o chkwtmp chkwtmp.c
  24. cc -DHAVE_LASTLOG_H -D_FILE_OFFSET_BITS=64 -o ifpromisc ifpromisc.c
  25. cc -o chkproc chkproc.c
  26. cc -o chkdirs chkdirs.c
  27. cc -o check_wtmpx check_wtmpx.c
  28. cc -static -o strings-static strings.c
  29. cc -o chkutmp chkutmp.c
  30. #添加环境变量
  31. [root@ecs-cecc ~]# cp -r chkrootkit-0.55/ /usr/local/chkrootkit
  32. [root@ecs-cecc ~]# cd /usr/local/chkrootkit/
  33. [root@ecs-cecc chkrootkit]# ls
  34. ACKNOWLEDGMENTS chkdirs chklastlog.c chkrootkit chkutmp.c COPYRIGHT Makefile README.chklastlog strings-static
  35. check_wtmpx chkdirs.c chkproc chkrootkit.lsm chkwtmp ifpromisc patch README.chkwtmp
  36. check_wtmpx.c chklastlog chkproc.c chkutmp chkwtmp.c ifpromisc.c README strings.c
  37. [root@ecs-cecc chkrootkit]# cp chkrootkit /usr/bin/
  38. #查看版本
  39. [root@ecs-cecc chkrootkit]# chkrootkit -V
  40. chkrootkit version 0.55
  41. #创建/var/log/chkrootkit/chkrootkit.log文件
  42. [root@ecs-cecc ~]# mkdir /var/log/chkrootkit/
  43. [root@ecs-cecc ~]# touch /var/log/chkrootkit/chkrootkit.log
  44. #扫描系统保存至/var/log/chkrootkit/chkrootkit.log
  45. [root@ecs-cecc ~]# chkrootkit > /var/log/chkrootkit/chkrootkit.log
  46. #查看扫描结果
  47. [root@ecs-cecc ~]# cat /var/log/chkrootkit/chkrootkit.log
  48. ROOTDIR is `/'
  49. Checking `amd'... not found
  50. Checking `basename'... not infected
  51. Checking `biff'... not found
  52. Checking `chfn'... not infected
  53. Checking `chsh'... not infected
  54. Checking `cron'... not infected
  55. Checking `crontab'... not infected
  56. Checking `date'... not infected
  57. Checking `du'... not infected
  58. Checking `dirname'... not infected
  59. Checking `echo'... not infected
  60. Checking `egrep'... not infected
  61. Checking `env'... not infected
  62. Checking `find'... not infected
  63. Checking `fingerd'... not found
  64. Checking `gpm'... not found
  65. Checking `grep'... not infected
  66. Checking `hdparm'... not found
  67. Checking `su'... not infected
  68. Checking `ifconfig'... not infected
  69. Checking `inetd'... not tested
  70. Checking `inetdconf'... not found
  71. Checking `identd'... not found
  72. Checking `init'... not infected
  73. Checking `killall'... not infected
  74. Checking `ldsopreload'... can't exec ./strings-static, not tested
  75. Checking `login'... not infected
  76. Checking `ls'... not infected
  77. Checking `lsof'... not infected
  78. Checking `mail'... not infected
  79. Checking `mingetty'... not found
  80. Checking `netstat'... not infected
  81. Checking `named'... not found
  82. Checking `passwd'... not infected
  83. Checking `pidof'... not infected
  84. Checking `pop2'... not found
  85. Checking `pop3'... not found
  86. Checking `ps'... not infected
  87. Checking `pstree'... not infected
  88. Checking `rpcinfo'... not found
  89. Checking `rlogind'... not found
  90. Checking `rshd'... not found
  91. Checking `slogin'... not infected
  92. Checking `sendmail'... not infected
  93. Checking `sshd'... not found
  94. Checking `syslogd'... not tested
  95. Checking `tar'... not infected
  96. Checking `tcpd'... not found
  97. Checking `tcpdump'... not infected
  98. Checking `top'... not infected
  99. Checking `telnetd'... not found
  100. Checking `timed'... not found
  101. Checking `traceroute'... not found
  102. Checking `vdir'... not infected
  103. Checking `w'... not infected
  104. Checking `write'... not infected
  105. Checking `aliens'... no suspect files
  106. Searching for sniffer's logs, it may take a while... nothing found
  107. Searching for HiDrootkit's default dir... nothing found
  108. Searching for t0rn's default files and dirs... nothing found
  109. Searching for t0rn's v8 defaults... nothing found
  110. Searching for Lion Worm default files and dirs... nothing found
  111. Searching for RSHA's default files and dir... nothing found
  112. Searching for RH-Sharpe's default files... nothing found
  113. Searching for Ambient's rootkit (ark) default files and dirs... nothing found
  114. Searching for suspicious files and dirs, it may take a while...
  115. /usr/lib/debug/usr/.dwz
  116. Searching for LPD Worm files and dirs... nothing found
  117. Searching for Ramen Worm files and dirs... nothing found
  118. Searching for Maniac files and dirs... nothing found
  119. Searching for RK17 files and dirs... nothing found
  120. Searching for Ducoci rootkit... nothing found
  121. Searching for Adore Worm... nothing found
  122. Searching for ShitC Worm... nothing found
  123. Searching for Omega Worm... nothing found
  124. Searching for Sadmind/IIS Worm... nothing found
  125. Searching for MonKit... nothing found
  126. Searching for Showtee... nothing found
  127. Searching for OpticKit... nothing found
  128. Searching for T.R.K... nothing found
  129. Searching for Mithra... nothing found
  130. Searching for LOC rootkit... nothing found
  131. Searching for Romanian rootkit... nothing found
  132. Searching for HKRK rootkit... nothing found
  133. Searching for Suckit rootkit... nothing found
  134. Searching for Volc rootkit... nothing found
  135. Searching for Gold2 rootkit... nothing found
  136. Searching for TC2 Worm default files and dirs... nothing found
  137. Searching for Anonoying rootkit default files and dirs... nothing found
  138. Searching for ZK rootkit default files and dirs... nothing found
  139. Searching for ShKit rootkit default files and dirs... nothing found
  140. Searching for AjaKit rootkit default files and dirs... nothing found
  141. Searching for zaRwT rootkit default files and dirs... nothing found
  142. Searching for Madalin rootkit default files... nothing found
  143. Searching for Fu rootkit default files... nothing found
  144. Searching for ESRK rootkit default files... nothing found
  145. Searching for rootedoor... nothing found
  146. Searching for ENYELKM rootkit default files... nothing found
  147. Searching for common ssh-scanners default files... nothing found
  148. Searching for Linux/Ebury - Operation Windigo ssh... not tested
  149. Searching for 64-bit Linux Rootkit ... nothing found
  150. Searching for 64-bit Linux Rootkit modules... nothing found
  151. Searching for Mumblehard Linux ... nothing found
  152. Searching for Backdoor.Linux.Mokes.a ... nothing found
  153. Searching for Malicious TinyDNS ... nothing found
  154. Searching for Linux.Xor.DDoS ... nothing found
  155. Searching for Linux.Proxy.1.0 ... nothing found
  156. Searching for CrossRAT ... nothing found
  157. Searching for Hidden Cobra ... nothing found
  158. Searching for Rocke Miner ... nothing found
  159. Searching for PWNLNX4 lkm... nothing found
  160. Searching for PWNLNX6 lkm... nothing found
  161. Searching for Umbreon lrk... nothing found
  162. Searching for Kinsing.a backdoor... nothing found
  163. Searching for RotaJakiro backdoor... nothing found
  164. Searching for suspect PHP files... nothing found
  165. Searching for anomalies in shell history files... Warning: `//root/.bash_history
  166. //root/.history' file size is zero
  167. Checking `asp'... not infected
  168. Checking `bindshell'... not infected
  169. Checking `lkm'... not tested: can't exec
  170. Checking `rexedcs'... not found
  171. Checking `sniffer'... not tested: can't exec ./ifpromisc
  172. Checking `w55808'... not infected
  173. Checking `wted'... not tested: can't exec ./chkwtmp
  174. Checking `scalper'... not infected
  175. Checking `slapper'... not infected
  176. Checking `z2'... not tested: can't exec ./chklastlog
  177. Checking `chkutmp'... not tested: can't exec ./chkutmp
  178. Checking `OSX_RSPLUG'... not tested

3.2.13 公有云安全:日志分析服务

然后使用提供的sepb_elk_latest.tar镜像安装ELK服务。

  1. #上传docker-repo.tar.gz,sepb_elk_latest.tar
  2. #解压docker-repo.tar.gz
  3. #配置yum源安装docker
  4. [root@ecs-cecc ~]# cat /etc/yum.repos.d/local.repo
  5. [local]
  6. name=local
  7. baseurl=file:///opt/docker-repo
  8. gpgcheck=0
  9. enabled=1
  10. [root@ecs-cecc ~]# yum clean all
  11. [root@ecs-cecc ~]# yum makecache
  12. #安装docker
  13. [root@ecs-cecc ~]# yum install -y docker-ce
  14. #启动docker,设置为开机自启
  15. [root@ecs-cecc ~]# systemctl start docker && systemctl enable docker
  16. Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
  17. #查看状态
  18. [root@ecs-cecc ~]# systemctl status docker
  19. #导入镜像
  20. [root@ecs-cecc ~]# docker load -i sepb_elk_latest.tar
  21. #启动elk容器(由于Elasticsearch启动需要最大虚拟内存区域数量,修改sysctl.conf文件追加vm.max_map_count=262144)
  22. [root@ecs-cecc ~]# docker run -p 5601:5601 -p 9200:9200 -p 5044:5044 -e ES_MIN_MEM=128m -e ES_MAX_MEM=1024m -it --name elk sebp/elk:latest
  23. [root@ecs-cecc ~]# docker ps
  24. CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
  25. 1bf5111a8a0c sebp/elk:latest "/usr/local/bin/star…" About a minute ago Up About a minute 0.0.0.0:5044->5044/tcp, 0.0.0.0:5601->5601/tcp, 9300/tcp, 0.0.0.0:9200->9200/tcp, 9600/tcp elk
  26. [root@ecs-cecc ~]#
  1. #上传filebeat-7.13.2-x86_64.rpm
  2. #安装filebeat
  3. [root@ecs-cecc ~]# yum install -y filebeat-7.13.2-x86_64.rpm
  4. #启动
  5. [root@ecs-cecc ~]# systemctl start filebeat
  6. #查看状态
  7. [root@ecs-cecc ~]# systemctl status filebeat
  8. #应用filebeat
  9. 方式一:(收集yum数据到本地文件)
  10. [root@ecs-cecc ~]# vi /etc/filebeat/filebeat.yml
  11. filebeat.inputs:
  12. - type: log
  13. enabled: True
  14. paths:
  15. - /var/log/yum.log
  16. output.file:
  17. path: "/tmp"
  18. filename: "filebeat-test.txt"
  19. #重启filebeat服务
  20. [root@ecs-cecc ~]# systemctl restart filebeat
  21. #安装httpd服务
  22. [root@ecs-cecc ~]# yum install -y httpd
  23. #验证
  24. [root@ecs-cecc tmp]# cat /tmp/filebeat-test.txt
  25. {"@timestamp":"2022-10-16T09:20:03.410Z","@metadata":{"beat":"filebeat","type":"_doc","version":"7.13.2"},"log":{"offset":2213,"file":{"path":"/var/log/yum.log"}},"message":"Oct 16 17:20:02 Installed: httpd-2.4.6-97.el7.centos.5.x86_64","input":{"type":"log"},"host":{"hostname":"ecs-cecc","architecture":"x86_64","name":"ecs-cecc","os":{"family":"redhat","name":"CentOS Linux","kernel":"3.10.0-1160.53.1.el7.x86_64","codename":"Core","type":"linux","platform":"centos","version":"7 (Core)"},"id":"acca19161ce94d449c58923b12797030","containerized":false,"ip":["192.168.1.151","fe80::f816:3eff:fe79:d168","172.17.0.1","fe80::42:40ff:fef4:5e7","fe80::14fb:49ff:feec:ffad"],"mac":["fa:16:3e:79:d1:68","02:42:40:f4:05:e7","16:fb:49:ec:ff:ad"]},"agent":{"version":"7.13.2","hostname":"ecs-cecc","ephemeral_id":"a522699e-3e6b-44a7-b833-d14b43d2edba","id":"67d653cb-908e-418f-9356-5b7f2461dbe8","name":"ecs-cecc","type":"filebeat"},"ecs":{"version":"1.8.0"},"cloud":{"machine":{"type":"c6s.xlarge.2"},"service":{"name":"Nova"},"provider":"openstack","instance":{"name":"ecs-cecc.novalocal","id":"i-0129dc00"},"availability_zone":"cn-east-2c"}}
  26. 方式二:(收集yum数据到Elasticsearch中)
  27. #修改配置文件
  28. [root@ecs-cecc ~]# cat /etc/filebeat/filebeat.yml
  29. filebeat.inputs:
  30. - type: log
  31. enabled: True
  32. paths:
  33. - /var/log/yum.log
  34. output.elasticsearch:
  35. hosts: ["localhost:9200"]
  36. #重启
  37. [root@ecs-cecc ~]# systemctl restart filebeat

3.2.14 WordPress应用部署

根据提供的chart包wordpress-13.0.23.tgz部署WordPress服务。

  1. [root@kcloud-21836 ~]# ll
  2. total 17768
  3. -rw-r----- 1 root root 1532 May 31 14:25 check_env.sh
  4. -rw-r----- 1 root root 1935 May 31 14:25 disk_filter.sh
  5. -rw------- 1 root root 14565908 May 31 14:30 helm-v3.10.1-linux-amd64.tar.gz
  6. drwxr-xr-x 2 3434 3434 4096 May 31 14:33 linux-amd64
  7. drwx------ 4 root root 4096 May 31 14:36 mariadb
  8. drwx------ 4 root root 4096 May 31 14:34 nginx
  9. -rw-r----- 1 root root 722 May 31 14:25 print_log.sh
  10. -rw-r----- 1 root root 3595678 Nov 29 2022 socat.tgz
  11. drwx------ 4 root root 4096 May 31 14:34 wordpress
  12. [root@kcloud-21836 ~]# helm install wordpress wordpress/
  13. NAME: wordpress
  14. LAST DEPLOYED: Wed May 31 14:37:44 2023
  15. NAMESPACE: default
  16. STATUS: deployed
  17. REVISION: 1
  18. TEST SUITE: None
  19. [root@kcloud-21836 ~]# kubectl get pod
  20. NAME READY STATUS RESTARTS AGE
  21. mysql-5c6c7986f5-4kr5b 1/1 Running 0 84s
  22. wordpress-7f9587fb4f-rrncz 1/1 Running 0 84s
  23. [root@kcloud-21836 ~]# kubectl get svc
  24. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  25. kubernetes ClusterIP 10.247.0.1 <none> 443/TCP 46m
  26. mariadb NodePort 10.247.206.107 <none> 3306:32334/TCP 15m
  27. mysql ClusterIP 10.247.202.181 <none> 3306/TCP 31m
  28. nginx NodePort 10.247.77.152 <none> 80:30880/TCP 21m
  29. wordpress NodePort 10.247.202.57 <none> 80:30417/TCP 31m

3.2.15 ChartMuseum仓库部署

在k8s集群中创建chartmuseum命名空间,编写yaml文件在chartmuseum命名空间中使用chartmuseum:latest镜像创建本地私有chart仓库。

  1. apiVersion: v1
  2. kind: Namespace
  3. metadata:
  4. name: chartmuseum
  5. ---
  6. apiVersion: apps/v1
  7. kind: Deployment
  8. metadata:
  9. labels:
  10. app: chartmuseum
  11. name: chartmuseum
  12. namespace: chartmuseum
  13. spec:
  14. replicas: 1
  15. selector:
  16. matchLabels:
  17. app: chartmuseum
  18. strategy:
  19. rollingUpdate:
  20. maxSurge: 1
  21. maxUnavailable: 1
  22. type: RollingUpdate
  23. template:
  24. metadata:
  25. labels:
  26. app: chartmuseum
  27. spec:
  28. containers:
  29. - image: chartmuseum/chartmuseum:latest
  30. imagePullPolicy: IfNotPresent
  31. name: chartmuseum
  32. ports:
  33. - containerPort: 8080
  34. protocol: TCP
  35. env:
  36. - name: DEBUG
  37. value: "1"
  38. - name: STORAGE
  39. value: local
  40. - name: STORAGE_LOCAL_ROOTDIR
  41. value: /charts
  42. resources:
  43. limits:
  44. cpu: 500m
  45. memory: 256Mi
  46. requests:
  47. cpu: 100m
  48. memory: 64Mi
  49. volumeMounts:
  50. - mountPath: /charts
  51. name: charts-volume
  52. volumes:
  53. - name: charts-volume
  54. nfs:
  55. path: /data/charts
  56. server: 192.168.200.10
  57. restartPolicy: Always
  58. ---
  59. apiVersion: v1
  60. kind: Service
  61. metadata:
  62. name: chartmuseum
  63. namespace: chartmuseum
  64. spec:
  65. ports:
  66. - port: 8080
  67. protocol: TCP
  68. targetPort: 8080
  69. selector:
  70. app: chartmuseum

3.2.16 生命周期管理-配置Pod生命周期

登录kcloud集群节点,在default命名空间下创建一个名Pod,并进行Pod生命周期管理。

  1. ##nginx为例
  2. apiVersion: v1
  3. kind: Pod
  4. metadata:
  5. name: pod-hook-exec
  6. namespace: dev
  7. spec:
  8. containers:
  9. - name: main-container
  10. image: nginx:1.17.1
  11. ports:
  12. - name: nginx-port
  13. containerPort: 80
  14. lifecycle:
  15. postStart:
  16. exec: #在容器启动的时候执行一个命令,修改掉nginx的默认首页内容
  17. command: ["/bin/sh","-c","echo postStart... > /usr/share/nginx/html/index.html"]
  18. preStop: #在容器停止之前停止nginx服务
  19. exec:
  20. command: ["/usr/sbin/nginx","-s","quit"]

3.2.17 定时任务管理—创建定时任务

在kcloud集群节点/root目录下编写yaml文件date.yaml完成定时管理任务。

  1. apiversion: batch/vl beta
  2. kind: CronJob
  3. metadata:
  4. name: hello
  5. spec:
  6. schedule: "*/1 * * * *"
  7. jobTemplate:
  8. spec:
  9. template:
  10. spec:
  11. containers:
  12. - name:hello
  13. image:busybox
  14. args:
  15. - /bin/sh
  16. - -C
  17. - date;echo Hello from the Kubernetes cluster restartPolicy:OnFailure

3.2.18 HPA管理—创建HPA规则

在kcloud集群节点/root目录下编写YAML文件hpa.yaml,完成HPA管理。

  1. [root@k8s-master-node1 ~]# cat web.yaml
  2. apiVersion: apps/v1
  3. kind: Deployment
  4. metadata:
  5. name: web
  6. namespace: default
  7. spec:
  8. replicas: 1
  9. selector:
  10. matchLabels:
  11. app: web
  12. template:
  13. metadata:
  14. labels:
  15. app: web
  16. spec:
  17. containers:
  18. - name: nginx
  19. image: nginx:latest
  20. imagePullPolicy: IfNotPresent
  21. ports:
  22. - containerPort: 80
  23. [root@k8s-master-node1 ~]# cat deployment.yaml
  24. apiVersion: apps/v1
  25. kind: Deployment
  26. metadata:
  27. name: web
  28. spec:
  29. replicas: 1
  30. selector:
  31. matchLabels:
  32. app: web
  33. template:
  34. metadata:
  35. labels:
  36. app: web
  37. spec:
  38. containers:
  39. - name: nginx
  40. image: nginx:latest
  41. imagePullPolicy: IfNotPresent
  42. resources:
  43. requests:
  44. cpu: 500m # 指定所需的 CPU 资源 request
  45. [root@k8s-master-node1 ~]# cat hpa.yaml
  46. apiVersion: autoscaling/v2beta2
  47. kind: HorizontalPodAutoscaler
  48. metadata:
  49. name: web
  50. namespace: default
  51. spec:
  52. scaleTargetRef:
  53. apiVersion: apps/v1
  54. kind: Deployment
  55. name: web
  56. minReplicas: 1
  57. maxReplicas: 1000 #指定伸缩范围为 1–1000
  58. metrics:
  59. - type: Resource
  60. resource:
  61. name: cpu
  62. target:
  63. type: Utilization
  64. averageUtilization: 80
  65. behavior:
  66. scaleUp:
  67. stabilizationWindowSeconds: 5
  68. policies:
  69. - type: Pods
  70. value: 9 #9倍数量的副本数
  71. periodSeconds: 1
  72. scaleDown:
  73. stabilizationWindowSeconds: 5 #时间窗口为 5s
  74. policies:
  75. - type: Pods
  76. value: 1
  77. periodSeconds: 1
  78. [root@k8s-master-node1 ~]# kubectl apply -f web.yaml
  79. [root@k8s-master-node1 ~]# kubectl apply -f deployment.yaml
  80. [root@k8s-master-node1 ~]# kubectl apply -f hpa.yaml

3.2.19 使用Helm模板

使用赛项提供的mychart-0.1.0.tgz模板包,在kcloud集群中安装名为nginx的模板实例。

  1. [root@kcloud-21836 ~]# helm install nginx nginx/
  2. NAME: nginx
  3. LAST DEPLOYED: Wed May 31 14:47:47 2023
  4. NAMESPACE: default
  5. STATUS: deployed
  6. REVISION: 1
  7. TEST SUITE: None
  8. [root@kcloud-21836 ~]# kubectl get pod
  9. NAME READY STATUS RESTARTS AGE
  10. mysql-5c6c7986f5-4kr5b 1/1 Running 0 10m
  11. nginx-d5f959df4-4lm2s 1/1 Running 0 9s
  12. wordpress-7f9587fb4f-rrncz 1/1 Running 0 10m
  13. [root@kcloud-21836 ~]# kubectl get svc
  14. NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
  15. kubernetes ClusterIP 10.247.0.1 <none> 443/TCP 46m
  16. mariadb NodePort 10.247.206.107 <none> 3306:32334/TCP 15m
  17. mysql ClusterIP 10.247.202.181 <none> 3306/TCP 31m
  18. nginx NodePort 10.247.77.152 <none> 80:30880/TCP 21m
  19. wordpress NodePort 10.247.202.57 <none> 80:30417/TCP 31m
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/菜鸟追梦旅行/article/detail/77668
推荐阅读
相关标签
  

闽ICP备14008679号