当前位置:   article > 正文

编译安装kubernetes 1.15.3

afpcjhn

环境:

操作系统: win10 on Ubuntu 18.04.3 LTS (GNU/Linux 4.4.0-18362-Microsoft x86_64)
GO 版本 go version go1.12.9 linux/amd64
GCC 版本 gcc version 7.4.0 (Ubuntu 7.4.0-1ubuntu1~18.04.1)
make 版本 Make 4.1
部署服务器: etcd 192.168.30.31 kubernetes 192.168.30.32
部署环境:centos 7.6
业务部署目录:/apps/业务
K8S 网段 10.66.0.0/16
docker pod 网段 10.67.0.0/16
使用kube-router 提供网络服务

go 环境部署

  1. cd /usr/local/src
  2. wget https://dl.google.com/go/go1.12.9.linux-amd64.tar.gz
  3. tar -xvf go1.12.9.linux-amd64.tar.gz
  4. mv go ../
  5. vi /etc/profile
  6. export GOPATH=/mnt/e/work/go
  7. export GOBIN=/mnt/e/work/go/bin
  8. PATH=$PATH:/usr/local/go/bin:$HOME/bin:$GOBIN
  9. export PATH
  10. source /etc/profile
  11. go version
  12. root@Qist:~# go version
  13. go version go1.12.9 linux/amd64
  14. 创建go 工作目录BIN目录
  15. mkdir -p /mnt/e/work/go/{bin,src,pkg}

安装编译kubernetes 1.15.3 依赖

apt -y install make gcc

编译kubernetes 1.15.3

  1. wget https://github.com/kubernetes/kubernetes/archive/v1.15.3.tar.gz
  2. tar -xvf v1.15.3.tar.gz
  3. cd kubernetes-1.15.3/
  4. make
  5. cd ./_output/local/bin/linux/amd64
  6. mkdir -p /mnt/e/work/k8s/bin
  7. cp -pdr kube* /mnt/e/work/k8s/bin/

编译证书生成工具

  1. go get github.com/cloudflare/cfssl/cmd/cfssl
  2. go get github.com/cloudflare/cfssl/cmd/cfssljson

部署etcd

  1. #etcd 使用二进制方式部署编译依赖会用到墙外的东西环境设置比较麻烦 etcd 节点服务器操作
  2. wget https://github.com/etcd-io/etcd/releases/download/v3.4.0/etcd-v3.4.0-linux-amd64.tar.gz
  3. mkdir -p /apps/etcd/{bin,conf,ssl,data}
  4. # 数据存储目录
  5. mkdir -p /apps/etcd/data/default.etcd
  6. # 创建 etcd 用户
  7. useradd etcd -s /sbin/nologin -M
  8. # 解压etcd
  9. tar -xvf etcd-v3.4.0-linux-amd64.tar.gz
  10. # cp 可执行文件到工作目录
  11. cd etcd-v3.4.0-linux-amd64/
  12. cp -pdr etcd etcdctl /apps/etcd/bin/
  13. # etcd 证书 在win on Ubuntu 操作
  14. mkdir -p /apps/work/k8s/cfssl/ && \
  15. cat << EOF | tee /apps/work/k8s/cfssl/ca-config.json
  16. {
  17. "signing": {
  18. "default": {
  19. "expiry": "87600h"
  20. },
  21. "profiles": {
  22. "kubernetes": {
  23. "usages": [
  24. "signing",
  25. "key encipherment",
  26. "server auth",
  27. "client auth"
  28. ],
  29. "expiry": "87600h"
  30. }
  31. }
  32. }
  33. }
  34. EOF
  35. # 创建etcd ca证书配置
  36. mkdir -p /apps/work/k8s/cfssl/etcd
  37. cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd-ca-csr.json
  38. {
  39. "CN": "etcd",
  40. "key": {
  41. "algo": "rsa",
  42. "size": 2048
  43. },
  44. "names": [
  45. {
  46. "C": "CN",
  47. "ST": "GuangDong",
  48. "L": "GuangZhou",
  49. "O": "cluster",
  50. "OU": "cluster"
  51. }
  52. ]
  53. }
  54. EOF
  55. # 生成 ETCD CA 证书和私钥
  56. mkdir -p /apps/work/k8s/cfssl/pki/etcd
  57. cfssl gencert -initca /apps/work/k8s/cfssl/etcd/etcd-ca-csr.json | cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd-ca
  58. # 创建 ETCD Server 证书
  59. export ETCD_SERVER_IPS=" \
  60. \"192.168.30.31\" \
  61. " && \
  62. export ETCD_SERVER_HOSTNAMES=" \
  63. \"etcd\" \
  64. " && \
  65. cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd_server.json
  66. {
  67. "CN": "etcd",
  68. "hosts": [
  69. "127.0.0.1",
  70. ${ETCD_SERVER_IPS},
  71. ${ETCD_SERVER_HOSTNAMES}
  72. ],
  73. "key": {
  74. "algo": "rsa",
  75. "size": 2048
  76. },
  77. "names": [
  78. {
  79. "C": "CN",
  80. "ST": "GuangDong",
  81. "L": "GuangZhou",
  82. "O": "cluster",
  83. "OU": "cluster"
  84. }
  85. ]
  86. }
  87. EOF
  88. # 生成 ETCD Server 证书和私钥
  89. cfssl gencert \
  90. -ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \
  91. -ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \
  92. -config=/apps/work/k8s/cfssl/ca-config.json \
  93. -profile=kubernetes \
  94. /apps/work/k8s/cfssl/etcd/etcd_server.json | \
  95. cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_server
  96. # 创建 ETCD Member 证书
  97. export ETCD_MEMBER_1_IP=" \
  98. \"192.168.30.31\" \
  99. " && \
  100. export ETCD_MEMBER_1_HOSTNAMES="etcd\
  101. " && \
  102. cat << EOF | tee /apps/work/k8s/cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json
  103. {
  104. "CN": "etcd",
  105. "hosts": [
  106. "127.0.0.1",
  107. ${ETCD_MEMBER_1_IP},
  108. "${ETCD_MEMBER_1_HOSTNAMES}"
  109. ],
  110. "key": {
  111. "algo": "rsa",
  112. "size": 2048
  113. },
  114. "names": [
  115. {
  116. "C": "CN",
  117. "ST": "GuangDong",
  118. "L": "GuangZhou",
  119. "O": "cluster",
  120. "OU": "cluster"
  121. }
  122. ]
  123. }
  124. EOF
  125. ##### 生成 ETCD Member 1 证书和私钥
  126. cfssl gencert \
  127. -ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \
  128. -ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \
  129. -config=/apps/work/k8s/cfssl/ca-config.json \
  130. -profile=kubernetes \
  131. /apps/work/k8s/cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json | \
  132. cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_member_${ETCD_MEMBER_1_HOSTNAMES}
  133. # 创建 ETCD Client 配置文件
  134. cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd_client.json
  135. {
  136. "CN": "client",
  137. "hosts": [""],
  138. "key": {
  139. "algo": "rsa",
  140. "size": 2048
  141. },
  142. "names": [
  143. {
  144. "C": "CN",
  145. "ST": "GuangDong",
  146. "L": "GuangZhou",
  147. "O": "cluster",
  148. "OU": "cluster"
  149. }
  150. ]
  151. }
  152. EOF
  153. #生成 ETCD Client 证书和私钥
  154. cfssl gencert \
  155. -ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \
  156. -ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \
  157. -config=/apps/work/k8s/cfssl/ca-config.json \
  158. -profile=kubernetes \
  159. /apps/work/k8s/cfssl/etcd/etcd_client.json | \
  160. cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_client
  161. # 复制证书到etcd 节点服务器
  162. scp -r /apps/work/k8s/cfssl/pki/etcd/* 192.168.30.31:/apps/etcd/ssl
  163. # 配置etcd 启动文件 etcd 服务器操作
  164. vi /apps/etcd/conf/etcd
  165. ETCD_OPTS="--name=etcd \
  166. --data-dir=/apps/etcd/data/default.etcd \
  167. --listen-peer-urls=https://192.168.30.31:2380 \
  168. --listen-client-urls=https://192.168.30.31:2379,https://127.0.0.1:2379 \
  169. --advertise-client-urls=https://192.168.30.31:2379 \
  170. --initial-advertise-peer-urls=https://192.168.30.31:2380 \
  171. --initial-cluster=etcd=https://192.168.30.31:2380\
  172. --initial-cluster-token=node4=etcd=https://192.168.30.31:2380 \
  173. --initial-cluster-state=new \
  174. --heartbeat-interval=6000 \
  175. --election-timeout=30000 \
  176. --snapshot-count=5000 \
  177. --auto-compaction-retention=1 \
  178. --max-request-bytes=33554432 \
  179. --quota-backend-bytes=17179869184 \
  180. --trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem \
  181. --cert-file=/apps/etcd/ssl/etcd_server.pem \
  182. --key-file=/apps/etcd/ssl/etcd_server-key.pem \
  183. --peer-cert-file=/apps/etcd/ssl/etcd_member_etcd.pem \
  184. --peer-key-file=/apps/etcd/ssl/etcd_member_etcd-key.pem \
  185. --peer-client-cert-auth \
  186. --peer-trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem"
  187. # 配置etcd 启动service 文件
  188. vi /usr/lib/systemd/system/etcd.service
  189. [Unit]
  190. Description=Etcd Server
  191. After=network.target
  192. After=network-online.target
  193. Wants=network-online.target
  194. [Service]
  195. Type=notify
  196. LimitNOFILE=1024000
  197. LimitNPROC=1024000
  198. LimitCORE=infinity
  199. LimitMEMLOCK=infinity
  200. User=etcd
  201. Group=etcd
  202. EnvironmentFile=-/apps/etcd/conf/etcd
  203. ExecStart=/apps/etcd/bin/etcd $ETCD_OPTS
  204. Restart=on-failure
  205. [Install]
  206. WantedBy=multi-user.target
  207. # etcd 目录 etcd 用户权限
  208. chown -R etcd.etcd /apps/etcd
  209. # 启动 etcd
  210. systemctl start etcd
  211. # 设置开机启动
  212. systemctl enable etcd
  213. # 设置环境变量
  214. /etc/profile
  215. export ETCDCTL_API=3
  216. export ENDPOINTS=https://192.168.30.31:2379
  217. #执行
  218. source /etc/profile
  219. vim ~/.bashrc
  220. alias etcdctl='/apps/etcd/bin/etcdctl --endpoints=${ENDPOINTS} --cacert=/apps/etcd/ssl/etcd-ca.pem'
  221. source ~/.bashrc
  222. # 查看状态
  223. etcdctl endpoint health
  224. [root@etcd ~]# etcdctl endpoint health
  225. https://192.168.30.31:2379 is healthy: successfully committed proposal: took = 16.707114ms

kube-apiserver 部署

  1. # win on Ubuntu 操作
  2. #创建kube-apiserver ca证书配置
  3. mkdir -p /apps/work/k8s/cfssl/k8s
  4. cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json
  5. {
  6. "CN": "kubernetes",
  7. "key": {
  8. "algo": "rsa",
  9. "size": 2048
  10. },
  11. "names": [
  12. {
  13. "C": "CN",
  14. "ST": "GuangDong",
  15. "L": "GuangZhou",
  16. "O": "cluster",
  17. "OU": "cluster"
  18. }
  19. ]
  20. }
  21. EOF
  22. #生成 Kubernetes CA 证书和私钥
  23. mkdir -p /apps/work/k8s/cfssl/pki/k8s
  24. cfssl gencert -initca /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json | \
  25. cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s-ca
  26. #创建 Kubernetes API Server 证书配置文件
  27. export K8S_APISERVER_VIP=" \
  28. \"192.168.30.32\" \
  29. " && \
  30. export K8S_APISERVER_SERVICE_CLUSTER_IP="10.66.0.1" && \
  31. export K8S_APISERVER_HOSTNAME="api.k8s.cluster.local" && \
  32. export K8S_CLUSTER_DOMAIN_SHORTNAME="cluster" && \
  33. export K8S_CLUSTER_DOMAIN_FULLNAME="cluster.local" && \
  34. cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_apiserver.json
  35. {
  36. "CN": "kubernetes",
  37. "hosts": [
  38. "127.0.0.1",
  39. ${K8S_APISERVER_VIP}
  40. "${K8S_APISERVER_SERVICE_CLUSTER_IP}",
  41. "${K8S_APISERVER_HOSTNAME}",
  42. "kubernetes",
  43. "kubernetes.default",
  44. "kubernetes.default.svc",
  45. "kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_SHORTNAME}",
  46. "kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_FULLNAME}"
  47. ],
  48. "key": {
  49. "algo": "rsa",
  50. "size": 2048
  51. },
  52. "names": [
  53. {
  54. "C": "CN",
  55. "ST": "GuangDong",
  56. "L": "GuangZhou",
  57. "O": "cluster",
  58. "OU": "cluster"
  59. }
  60. ]
  61. }
  62. EOF
  63. #生成 Kubernetes API Server 证书和私钥
  64. cfssl gencert \
  65. -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  66. -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  67. -config=/apps/work/k8s/cfssl/ca-config.json \
  68. -profile=kubernetes \
  69. /apps/work/k8s/cfssl/k8s/k8s_apiserver.json | \
  70. cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_server
  71. # 创建 Kubernetes webhook 证书配置文件
  72. cat << EOF | tee /apps/work/k8s/cfssl/k8s/aggregator.json
  73. {
  74. "CN": "aggregator",
  75. "hosts": [""],
  76. "key": {
  77. "algo": "rsa",
  78. "size": 2048
  79. },
  80. "names": [
  81. {
  82. "C": "CN",
  83. "ST": "GuangDong",
  84. "L": "GuangZhou",
  85. "O": "cluster",
  86. "OU": "cluster"
  87. }
  88. ]
  89. }
  90. EOF
  91. ##### 生成 Kubernetes webhook 证书和私钥
  92. cfssl gencert \
  93. -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  94. -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  95. -config=/apps/work/k8s/cfssl/ca-config.json \
  96. -profile=kubernetes \
  97. /apps/work/k8s/cfssl/k8s/aggregator.json | \
  98. cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/aggregator
  99. # 远程服务器创建目录
  100. mkdir -p /apps/kubernetes/{bin,conf,config,kubelet-plugins,log,ssl}
  101. #证书到分发到 192.168.30.32
  102. scp -r /apps/work/k8s/cfssl/pki/k8s 192.168.30.32:/apps/kubernetes/ssl/k8s
  103. # cp etcd_client 证书
  104. scp -r /apps/work/k8s/cfssl/pki/etcd/etcd_client* 192.168.30.32:/apps/kubernetes/ssl/etcd
  105. scp -r /apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem 192.168.30.32:/apps/kubernetes/ssl/etcd
  106. # 分发kubernetes 二进制文件到远程服务器把所有的二进制都cp 过去
  107. scp -r /mnt/e/work/k8s/bin/* 192.168.30.32:/apps/kubernetes/bin
  108. # 远程服务器操作192.168.30.32
  109. # 创建 k8s 用户
  110. useradd k8s -s /sbin/nologin -M
  111. # encryption-config.yaml 生成
  112. cd /apps/kubernetes/config
  113. export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
  114. cat > encryption-config.yaml <<EOF
  115. kind: EncryptionConfig
  116. apiVersion: v1
  117. resources:
  118. - resources:
  119. - secrets
  120. providers:
  121. - aescbc:
  122. keys:
  123. - name: key1
  124. secret: ${ENCRYPTION_KEY}
  125. - identity: {}
  126. EOF
  127. # Apiserver配置文件生成
  128. cd /apps/kubernetes/conf
  129. vi kube-apiserver
  130. KUBE_APISERVER_OPTS="--logtostderr=false \
  131. --bind-address=192.168.30.32 \
  132. --advertise-address=192.168.30.32 \
  133. --secure-port=5443 \
  134. --insecure-port=0 \
  135. --service-cluster-ip-range=10.66.0.0/16 \
  136. --service-node-port-range=30000-65000 \
  137. --etcd-cafile=/apps/kubernetes/ssl/etcd/etcd-ca.pem \
  138. --etcd-certfile=/apps/kubernetes/ssl/etcd/etcd_client.pem \
  139. --etcd-keyfile=/apps/kubernetes/ssl/etcd/etcd_client-key.pem \
  140. --etcd-prefix=/registry \
  141. --etcd-servers=https://192.168.30.31:2379 \
  142. --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  143. --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_server.pem \
  144. --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_server-key.pem \
  145. --kubelet-client-certificate=/apps/kubernetes/ssl/k8s/k8s_server.pem \
  146. --kubelet-client-key=/apps/kubernetes/ssl/k8s/k8s_server-key.pem \
  147. --service-account-key-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  148. --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  149. --proxy-client-cert-file=/apps/kubernetes/ssl/k8s/aggregator.pem \
  150. --proxy-client-key-file=/apps/kubernetes/ssl/k8s/aggregator-key.pem \
  151. --requestheader-allowed-names=aggregator \
  152. --requestheader-group-headers=X-Remote-Group \
  153. --requestheader-extra-headers-prefix=X-Remote-Extra- \
  154. --requestheader-username-headers=X-Remote-User \
  155. --enable-aggregator-routing=true \
  156. --anonymous-auth=false \
  157. --allow-privileged=true \
  158. --experimental-encryption-provider-config=/apps/kubernetes/config/encryption-config.yaml \
  159. --enable-admission-plugins=AlwaysPullImages,DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,OwnerReferencesPermissionEnforcement,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection MutatingAdmissionWebhook ValidatingAdmissionWebhook \
  160. --disable-admission-plugins=DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy \
  161. --cors-allowed-origins=.* \
  162. --enable-swagger-ui \
  163. --runtime-config=api/all=true \
  164. --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
  165. --authorization-mode=Node,RBAC \
  166. --apiserver-count=1 \
  167. --audit-log-maxage=30 \
  168. --audit-log-maxbackup=3 \
  169. --audit-log-maxsize=100 \
  170. --kubelet-https \
  171. --event-ttl=1h \
  172. --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \
  173. --enable-bootstrap-token-auth=true \
  174. --audit-log-path=/apps/kubernetes/log/api-server-audit.log \
  175. --alsologtostderr=true \
  176. --log-dir=/apps/kubernetes/log \
  177. --v=2 \
  178. --endpoint-reconciler-type=lease \
  179. --max-mutating-requests-inflight=100 \
  180. --max-requests-inflight=500 \
  181. --target-ram-mb=6000"
  182. # kube-apiserver systemd文件
  183. vi /usr/lib/systemd/system/kube-apiserver.service
  184. [Unit]
  185. Description=Kubernetes API Server
  186. Documentation=https://github.com/kubernetes/kubernetes
  187. [Service]
  188. Type=notify
  189. LimitNOFILE=1024000
  190. LimitNPROC=1024000
  191. LimitCORE=infinity
  192. LimitMEMLOCK=infinity
  193. EnvironmentFile=-/apps/kubernetes/conf/kube-apiserver
  194. ExecStart=/apps/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
  195. Restart=on-failure
  196. RestartSec=5
  197. User=k8s
  198. [Install]
  199. WantedBy=multi-user.target
  200. # /apps/kubernetes 目录k8s 权限
  201. chown -R k8s.k8s /apps/kubernetes
  202. # 启动kube-apiserver
  203. systemctl start kube-apiserver
  204. # 设置开机启动
  205. systemctl enable kube-apiserver
  206. #创建admin管理员证书 win on Ubuntu 操作
  207. cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_apiserver_admin.json
  208. {
  209. "CN": "admin",
  210. "hosts": [""],
  211. "key": {
  212. "algo": "rsa",
  213. "size": 2048
  214. },
  215. "names": [
  216. {
  217. "C": "CN",
  218. "ST": "GuangDong",
  219. "L": "GuangZhou",
  220. "O": "system:masters",
  221. "OU": "Kubernetes-manual"
  222. }
  223. ]
  224. }
  225. EOF
  226. cfssl gencert -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  227. -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  228. -config=/apps/work/k8s/cfssl/ca-config.json \
  229. -profile=kubernetes \
  230. /apps/work/k8s/cfssl/k8s/k8s_apiserver_admin.json | \
  231. cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin
  232. # 创建admin管理员登录 admin.kubeconfig
  233. export KUBE_APISERVER="https://192.168.30.32:5443"
  234. kubectl config set-cluster kubernetes \
  235. --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  236. --embed-certs=true \
  237. --server=${KUBE_APISERVER} \
  238. --kubeconfig=admin.kubeconfig
  239. kubectl config set-credentials admin \
  240. --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin.pem \
  241. --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin-key.pem \
  242. --embed-certs=true \
  243. --kubeconfig=admin.kubeconfig
  244. kubectl config set-context kubernetes \
  245. --cluster=kubernetes \
  246. --user=admin \
  247. --namespace=kube-system \
  248. --kubeconfig=admin.kubeconfig
  249. kubectl config use-context kubernetes --kubeconfig=admin.kubeconfig
  250. # cp 证书到当前用户目录
  251. cp admin.kubeconfig ~/.kube/config
  252. # 验证kube-apiserver 是否正常
  253. kubectl cluster-info
  254. [root@]~]#kubectl cluster-info
  255. Kubernetes master is running at https://192.168.30.32:5443

kube_scheduler 部署

  1. # 生成kube_scheduler访问kube-apiserver 证书win on Ubuntu 操作
  2. cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_scheduler.json
  3. {
  4. "CN": "system:kube-scheduler",
  5. "hosts": [""],
  6. "key": {
  7. "algo": "rsa",
  8. "size": 2048
  9. },
  10. "names": [
  11. {
  12. "C": "CN",
  13. "ST": "GuangDong",
  14. "L": "GuangZhou",
  15. "O": "system:kube-scheduler",
  16. "OU": "Kubernetes-manual"
  17. }
  18. ]
  19. }
  20. EOF
  21. ## 生成 Kubernetes Scheduler 证书和私钥
  22. cfssl gencert \
  23. -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  24. -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  25. -config=/apps/work/k8s/cfssl/ca-config.json \
  26. -profile=kubernetes \
  27. /apps/work/k8s/cfssl/k8s/k8s_scheduler.json | \
  28. cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_scheduler
  29. #创建kube_scheduler.kubeconfig
  30. kubectl config set-cluster kubernetes \
  31. --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  32. --embed-certs=true \
  33. --server=${KUBE_APISERVER} \
  34. --kubeconfig=kube_scheduler.kubeconfig
  35. kubectl config set-credentials system:kube-scheduler \
  36. --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_scheduler.pem \
  37. --embed-certs=true \
  38. --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_scheduler-key.pem \
  39. --kubeconfig=kube_scheduler.kubeconfig
  40. kubectl config set-context kubernetes \
  41. --cluster=kubernetes \
  42. --user=system:kube-scheduler \
  43. --kubeconfig=kube_scheduler.kubeconfig
  44. kubectl config use-context kubernetes --kubeconfig=kube_scheduler.kubeconfig
  45. # cp kube_scheduler.kubeconfig 到远程服务器
  46. scp kube_scheduler.kubeconfig 192.168.30.32:/apps/kubernetes/config
  47. # 远程服务器操作
  48. cd /apps/kubernetes/conf
  49. # 创建kube-scheduler 启动配置文件
  50. vi kube-scheduler
  51. KUBE_SCHEDULER_OPTS=" \
  52. --logtostderr=false \
  53. --address=0.0.0.0 \
  54. --leader-elect=true \
  55. --kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \
  56. --authentication-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \
  57. --authorization-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \
  58. --alsologtostderr=true \
  59. --kube-api-qps=100 \
  60. --kube-api-burst=100 \
  61. --log-dir=/apps/kubernetes/log \
  62. --v=2"
  63. # 创建/kube-scheduler 启动文件
  64. vi /usr/lib/systemd/system/kube-scheduler.service
  65. [Unit]
  66. Description=Kubernetes Scheduler
  67. Documentation=https://github.com/kubernetes/kubernetes
  68. [Service]
  69. LimitNOFILE=1024000
  70. LimitNPROC=1024000
  71. LimitCORE=infinity
  72. LimitMEMLOCK=infinity
  73. EnvironmentFile=-/apps/kubernetes/conf/kube-scheduler
  74. ExecStart=/apps/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
  75. Restart=on-failure
  76. RestartSec=5
  77. User=k8s
  78. [Install]
  79. WantedBy=multi-user.target
  80. # 给新创建文件 k8s 用户权限
  81. chown -R k8s.k8s /apps/kubernetes
  82. # 启动 kube-scheduler
  83. systemctl start kube-scheduler
  84. # 设置开启启动
  85. systemctl enable kube-scheduler
  86. # 验证状态
  87. kubectl get cs
  88. [root@]~]# kubectl get cs
  89. NAME STATUS MESSAGE ERROR
  90. controller-manager Healthy ok
  91. scheduler Healthy ok
  92. etcd-0 Healthy {"health":"true"}

kube-controller-manager部署

  1. # 生成kube-controller-manager访问kube-apiserver 证书 win on Ubuntu 操作
  2. cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_controller_manager.json
  3. {
  4. "CN": "system:kube-controller-manager",
  5. "hosts": [""],
  6. "key": {
  7. "algo": "rsa",
  8. "size": 2048
  9. },
  10. "names": [
  11. {
  12. "C": "CN",
  13. "ST": "GuangDong",
  14. "L": "GuangZhou",
  15. "O": "system:kube-controller-manager",
  16. "OU": "Kubernetes-manual"
  17. }
  18. ]
  19. }
  20. EOF
  21. ## 生成 Kubernetes Controller Manager 证书和私钥
  22. cfssl gencert \
  23. -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  24. -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  25. -config=/apps/work/k8s/cfssl/ca-config.json \
  26. -profile=kubernetes \
  27. /apps/work/k8s/cfssl/k8s/k8s_controller_manager.json | \
  28. cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager
  29. # 创建kube_controller_manager.kubeconfig
  30. kubectl config set-cluster kubernetes \
  31. --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  32. --embed-certs=true \
  33. --server=${KUBE_APISERVER} \
  34. --kubeconfig=kube_controller_manager.kubeconfig
  35. kubectl config set-credentials system:kube-controller-manager \
  36. --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager.pem \
  37. --embed-certs=true \
  38. --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager-key.pem \
  39. --kubeconfig=kube_controller_manager.kubeconfig
  40. kubectl config set-context kubernetes \
  41. --cluster=kubernetes \
  42. --user=system:kube-controller-manager \
  43. --kubeconfig=kube_controller_manager.kubeconfig
  44. kubectl config use-context kubernetes --kubeconfig=kube_controller_manager.kubeconfig
  45. # cp kube_controller_manager.kubeconfig 到远程服务器
  46. scp kube_controller_manager.kubeconfig 192.168.30.32:/apps/kubernetes/config
  47. # 远程服务器操作
  48. cd /apps/kubernetes/conf
  49. vi kube-controller-manager
  50. KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
  51. --leader-elect=true \
  52. --address=0.0.0.0 \
  53. --service-cluster-ip-range=10.66.0.0/16 \
  54. --cluster-cidr=10.67.0.0/16 \
  55. --node-cidr-mask-size=24 \
  56. --cluster-name=kubernetes \
  57. --allocate-node-cidrs=true \
  58. --kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \
  59. --authentication-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \
  60. --authorization-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \
  61. --use-service-account-credentials=true \
  62. --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  63. --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  64. --node-monitor-grace-period=40s \
  65. --node-monitor-period=5s \
  66. --pod-eviction-timeout=5m0s \
  67. --terminated-pod-gc-threshold=50 \
  68. --alsologtostderr=true \
  69. --cluster-signing-cert-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  70. --cluster-signing-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem \
  71. --deployment-controller-sync-period=10s \
  72. --experimental-cluster-signing-duration=86700h0m0s \
  73. --enable-garbage-collector=true \
  74. --root-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  75. --service-account-private-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem \
  76. --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \
  77. --controllers=*,bootstrapsigner,tokencleaner \
  78. --horizontal-pod-autoscaler-use-rest-clients=true \
  79. --horizontal-pod-autoscaler-sync-period=10s \
  80. --flex-volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume \
  81. --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager.pem \
  82. --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager-key.pem \
  83. --kube-api-qps=100 \
  84. --kube-api-burst=100 \
  85. --log-dir=/apps/kubernetes/log \
  86. --v=2"
  87. # 创建启动文件kube-controller-manager
  88. vi /usr/lib/systemd/system/kube-controller-manager.service
  89. [Unit]
  90. Description=Kubernetes Controller Manager
  91. Documentation=https://github.com/kubernetes/kubernetes
  92. [Service]
  93. LimitNOFILE=1024000
  94. LimitNPROC=1024000
  95. LimitCORE=infinity
  96. LimitMEMLOCK=infinity
  97. EnvironmentFile=-/apps/kubernetes/conf/kube-controller-manager
  98. ExecStart=/apps/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
  99. Restart=on-failure
  100. RestartSec=5
  101. User=k8s
  102. [Install]
  103. WantedBy=multi-user.target
  104. # 给新创建文件 k8s 用户权限
  105. chown -R k8s.k8s /apps/kubernetes
  106. # 启动kube-controller-manager
  107. systemctl start kube-controller-manager
  108. # 设置开机启动
  109. systemctl enable kube-controller-manager
  110. # 验证状态
  111. [root@]~]#kubectl get cs
  112. NAME STATUS MESSAGE ERROR
  113. scheduler Healthy ok
  114. controller-manager Healthy ok
  115. etcd-0 Healthy {"health":"true"}
  116. # 配置 kube-controller-manager,kubelet 、kube-scheduler 访问kube-api 用户授权
  117. 授予 kubernetes API 的权限
  118. kubectl create clusterrolebinding controller-node-clusterrolebing --clusterrole=system:kube-controller-manager --user=system:kube-controller-manager
  119. kubectl create clusterrolebinding scheduler-node-clusterrolebing --clusterrole=system:kube-scheduler --user=system:kube-scheduler
  120. kubectl create clusterrolebinding controller-manager:system:auth-delegator --user system:kube-controller-manager --clusterrole system:auth-delegator
  121. 授予 kubernetes 证书访问 kubelet API 的权限
  122. kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
  123. kubectl create clusterrolebinding kubelet-node-clusterbinding --clusterrole=system:node --group=system:nodes
  124. kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

docker 部署

  1. # 远程服务器节点操作
  2. # 使用阿里源
  3. cat > /etc/yum.repos.d/docker-ce.repo << EOF
  4. [docker-ce-stable]
  5. name=Docker CE Stable - \$basearch
  6. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/stable
  7. enabled=1
  8. gpgcheck=1
  9. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  10. [docker-ce-stable-debuginfo]
  11. name=Docker CE Stable - Debuginfo \$basearch
  12. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/stable
  13. enabled=0
  14. gpgcheck=1
  15. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  16. [docker-ce-stable-source]
  17. name=Docker CE Stable - Sources
  18. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/stable
  19. enabled=0
  20. gpgcheck=1
  21. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  22. [docker-ce-edge]
  23. name=Docker CE Edge - \$basearch
  24. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/edge
  25. enabled=0
  26. gpgcheck=1
  27. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  28. [docker-ce-edge-debuginfo]
  29. name=Docker CE Edge - Debuginfo \$basearch
  30. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/edge
  31. enabled=0
  32. gpgcheck=1
  33. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  34. [docker-ce-edge-source]
  35. name=Docker CE Edge - Sources
  36. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/edge
  37. enabled=0
  38. gpgcheck=1
  39. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  40. [docker-ce-test]
  41. name=Docker CE Test - \$basearch
  42. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/test
  43. enabled=0
  44. gpgcheck=1
  45. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  46. [docker-ce-test-debuginfo]
  47. name=Docker CE Test - Debuginfo \$basearch
  48. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/test
  49. enabled=0
  50. gpgcheck=1
  51. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  52. [docker-ce-test-source]
  53. name=Docker CE Test - Sources
  54. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/test
  55. enabled=0
  56. gpgcheck=1
  57. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  58. [docker-ce-nightly]
  59. name=Docker CE Nightly - \$basearch
  60. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/nightly
  61. enabled=0
  62. gpgcheck=1
  63. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  64. [docker-ce-nightly-debuginfo]
  65. name=Docker CE Nightly - Debuginfo \$basearch
  66. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/nightly
  67. enabled=0
  68. gpgcheck=1
  69. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  70. [docker-ce-nightly-source]
  71. name=Docker CE Nightly - Sources
  72. baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/nightly
  73. enabled=0
  74. gpgcheck=1
  75. gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
  76. EOF
  77. # 安装docker依赖
  78. yum install -y python-pip python-devel yum-utils device-mapper-persistent-data lvm2
  79. # 安装docker
  80. yum install -y docker-ce
  81. # 修改docker 启动配置
  82. vi /lib/systemd/system/docker.service
  83. ExecStart= 修改成
  84. ExecStart=/usr/bin/dockerd -H fd:// --graph /apps/docker -H unix:///var/run/docker.sock --max-concurrent-downloads=20 --log-opt max-size=200M --log-opt max-file=10 --default-ulimit nofile=1024000 --default-ulimit nproc=1024000
  85. # reload service 配置
  86. systemctl daemon-reload
  87. # 重启docker
  88. systemctl restart docker
  89. # 设置开机启动
  90. systemctl enable docker

安装kubelet 依赖

  1. # 远程服务器节点操作
  2. cni 插件安装
  3. mkdir -p /apps/cni/bin
  4. cd /apps/cni/bin
  5. wget https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz
  6. tar -xvf cni-plugins-linux-amd64-v0.8.2.tgz
  7. rm -f cni-plugins-linux-amd64-v0.8.2.tgz
  8. # 创建/etc/cni/net.d 目录
  9. mkdir -p /etc/cni/net.d
  10. vi /etc/cni/net.d/10-kuberouter.conflist
  11. {
  12. "cniVersion":"0.3.0",
  13. "name":"mynet",
  14. "plugins":[
  15. {
  16. "name":"kubernetes",
  17. "type":"bridge",
  18. "bridge":"kube-bridge",
  19. "isDefaultGateway":true,
  20. "ipam":{
  21. "type":"host-local"
  22. }
  23. },
  24. {
  25. "type":"portmap",
  26. "capabilities":{
  27. "snat":true,
  28. "portMappings":true
  29. }
  30. }
  31. ]
  32. }
  33. # lxcfs 安装
  34. yum install -y git automake libtool fuse-devel
  35. git clone git://github.com/lxc/lxcfs
  36. cd lxcfs/
  37. ./bootstrap.sh
  38. ./configure
  39. make
  40. make install
  41. # 创建目录
  42. mkdir -p /var/lib/lxcfs/
  43. # 配置 lxcfs 启动文件
  44. vi /usr/lib/systemd/system/lxcfs.service
  45. [Unit]
  46. Description=FUSE filesystem for LXC
  47. ConditionVirtualization=!container
  48. Before=lxc.service
  49. Documentation=man:lxcfs(1)
  50. [Service]
  51. ExecStart=/usr/local/bin/lxcfs /var/lib/lxcfs/
  52. KillMode=process
  53. Restart=on-failure
  54. ExecStopPost=-/bin/fusermount -u /var/lib/lxcfs
  55. Delegate=yes
  56. [Install]
  57. WantedBy=multi-user.target
  58. # 启动lxcfs
  59. systemctl start lxcfs
  60. # 设置开机启动
  61. systemctl enable lxcfs
  62. # 安装 kubelet 依赖
  63. yum install -y epel-release
  64. yum install -y yum-utils ipvsadm telnet wget net-tools conntrack ipset jq iptables curl sysstat libseccomp socat nfs-utils fuse fuse-devel

kubelet 部署

  1. # win on Ubuntu 操作
  2. # 生成 bootstrap Token
  3. # Bootstrap Token 生成
  4. echo "$(head -c 6 /dev/urandom | md5sum | head -c 6)"."$(head -c 16 /dev/urandom | md5sum | head -c 16)"
  5. 9dad00.2ac445bf1cc5e9c2
  6. vi bootstrap.secret.yaml
  7. apiVersion: v1
  8. kind: Secret
  9. metadata:
  10. # Name MUST be of form "bootstrap-token-<token id>"
  11. name: bootstrap-token-9dad00
  12. namespace: kube-system
  13. # Type MUST be 'bootstrap.kubernetes.io/token'
  14. type: bootstrap.kubernetes.io/token
  15. stringData:
  16. # Human readable description. Optional.
  17. description: "The default bootstrap token generated by 'kubelet '."
  18. # Token ID and secret. Required.
  19. token-id: 9dad00
  20. token-secret: 2ac445bf1cc5e9c2
  21. # Allowed usages.
  22. usage-bootstrap-authentication: "true"
  23. usage-bootstrap-signing: "true"
  24. # Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
  25. auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
  26. ### 创建k8s资源
  27. kubectl create -f bootstrap.secret.yaml
  28. ### 创建bootstrap.clusterrole.yaml
  29. vi bootstrap.clusterrole.yaml
  30. # A ClusterRole which instructs the CSR approver to approve a node requesting a
  31. # serving cert matching its client cert.
  32. kind: ClusterRole
  33. apiVersion: rbac.authorization.k8s.io/v1
  34. metadata:
  35. name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
  36. rules:
  37. - apiGroups: ["certificates.k8s.io"]
  38. resources: ["certificatesigningrequests/selfnodeserver"]
  39. verbs: ["create"]
  40. kubectl create -f bootstrap.clusterrole.yaml
  41. ### 创建 apiserver-to-kubelet.yaml
  42. vi apiserver-to-kubelet.yaml
  43. apiVersion: rbac.authorization.k8s.io/v1
  44. kind: ClusterRole
  45. metadata:
  46. annotations:
  47. rbac.authorization.kubernetes.io/autoupdate: "true"
  48. labels:
  49. kubernetes.io/bootstrapping: rbac-defaults
  50. name: system:kubernetes-to-kubelet
  51. rules:
  52. - apiGroups:
  53. - ""
  54. resources:
  55. - nodes/proxy
  56. - nodes/stats
  57. - nodes/log
  58. - nodes/spec
  59. - nodes/metrics
  60. verbs:
  61. - "*"
  62. ---
  63. apiVersion: rbac.authorization.k8s.io/v1
  64. kind: ClusterRoleBinding
  65. metadata:
  66. name: system:kubernetes
  67. namespace: ""
  68. roleRef:
  69. apiGroup: rbac.authorization.k8s.io
  70. kind: ClusterRole
  71. name: system:kubernetes-to-kubelet
  72. subjects:
  73. - apiGroup: rbac.authorization.k8s.io
  74. kind: User
  75. name: kubernetes
  76. kubectl create -f apiserver-to-kubelet.yaml
  77. ### 查看创建的token
  78. kubeadm token list
  79. # 允许 system:bootstrappers 组用户创建 CSR 请求
  80. kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
  81. # 自动批准 system:bootstrappers 组用户 TLS bootstrapping 首次申请证书的 CSR 请求
  82. kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers
  83. # 自动批准 system:nodes 组用户更新 kubelet 自身与 apiserver 通讯证书的 CSR 请求
  84. kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
  85. # 自动批准 system:nodes 组用户更新 kubelet 10250 api 端口证书的 CSR 请求
  86. kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes
  87. # 创建bootstrap.kubeconfig
  88. # 设置集群参数
  89. kubectl config set-cluster kubernetes \
  90. --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  91. --embed-certs=true \
  92. --server=${KUBE_APISERVER} \
  93. --kubeconfig=bootstrap.kubeconfig
  94. # 设置客户端认证参数
  95. kubectl config set-credentials system:bootstrap:9dad00 \
  96. --token=9dad00.2ac445bf1cc5e9c2 \
  97. --kubeconfig=bootstrap.kubeconfig
  98. # 设置上下文参数
  99. kubectl config set-context default \
  100. --cluster=kubernetes \
  101. --user=system:bootstrap:9dad00 \
  102. --kubeconfig=bootstrap.kubeconfig
  103. # 设置默认上下文
  104. kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
  105. # 分发 bootstrap.kubeconfig 到远程节点
  106. scp bootstrap.kubeconfig 192.168.30.32:/apps/kubernetes/conf
  107. # 创建kubelet 启动配置文件 远程节点 操作
  108. cd /apps/kubernetes/conf
  109. vi kubelet
  110. KUBELET_OPTS="--bootstrap-kubeconfig=/apps/kubernetes/conf/bootstrap.kubeconfig \
  111. --fail-swap-on=false \
  112. --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/apps/cni/bin \
  113. --kubeconfig=/apps/kubernetes/conf/kubelet.kubeconfig \
  114. --address=192.168.30.32 \
  115. --node-ip=192.168.30.32 \
  116. --hostname-override=master \
  117. --cluster-dns=10.66.0.2 \
  118. --cluster-domain=cluster.local \
  119. --authorization-mode=Webhook \
  120. --authentication-token-webhook=true \
  121. --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
  122. --rotate-certificates=true \
  123. --cgroup-driver=cgroupfs \
  124. --healthz-port=10248 \
  125. --healthz-bind-address=192.168.30.32 \
  126. --cert-dir=/apps/kubernetes/ssl \
  127. --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
  128. --node-labels=node-role.kubernetes.io/k8s-node=true \
  129. --serialize-image-pulls=false \
  130. --enforce-node-allocatable=pods,kube-reserved,system-reserved \
  131. --pod-manifest-path=/apps/work/kubernetes/manifests \
  132. --runtime-cgroups=/systemd/system.slice/kubelet.service \
  133. --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \
  134. --system-reserved-cgroup=/systemd/system.slice \
  135. --root-dir=/apps/work/kubernetes/kubelet \
  136. --log-dir=/apps/kubernetes/log \
  137. --alsologtostderr=true \
  138. --logtostderr=false \
  139. --anonymous-auth=true \
  140. --image-gc-high-threshold=70 \
  141. --image-gc-low-threshold=50 \
  142. --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
  143. --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
  144. --eviction-hard=memory.available<500Mi,nodefs.available<10% \
  145. --serialize-image-pulls=false \
  146. --sync-frequency=30s \
  147. --resolv-conf=/etc/resolv.conf \
  148. --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
  149. --image-pull-progress-deadline=30s \
  150. --v=2 \
  151. --event-burst=30 \
  152. --event-qps=15 \
  153. --kube-api-burst=30 \
  154. --kube-api-qps=15 \
  155. --max-pods=200 \
  156. --pods-per-core=10 \
  157. --read-only-port=0 \
  158. --allowed-unsafe-sysctls 'kernel.msg*,kernel.shm*,kernel.sem,fs.mqueue.*,net.*' \
  159. --volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume"
  160. # 创建 kubelet 服务文件
  161. vi /usr/lib/systemd/system/kubelet.service
  162. [Unit]
  163. Description=Kubernetes Kubelet
  164. After=docker.service
  165. Requires=docker.service
  166. [Service]
  167. LimitNOFILE=1024000
  168. LimitNPROC=1024000
  169. LimitCORE=infinity
  170. LimitMEMLOCK=infinity
  171. EnvironmentFile=-/apps/kubernetes/conf/kubelet
  172. ExecStart=/apps/kubernetes/bin/kubelet $KUBELET_OPTS
  173. Restart=on-failure
  174. KillMode=process
  175. [Install]
  176. WantedBy=multi-user.target
  177. # 启动 kubelet
  178. systemctl start kubelet
  179. # 设置开机启动
  180. systemctl enable kubelet
  181. #查看证书是否签发
  182. cd /apps/kubernetes/ssl/
  183. [root@master ssl]# ll
  184. total 12
  185. drwxr-xr-x 2 k8s k8s 75 Jul 4 15:06 etcd
  186. drwxr-xr-x 2 k8s k8s 310 Sep 3 11:18 k8s
  187. -rw------- 1 k8s k8s 1277 Sep 3 10:25 kubelet-client-2019-09-03-10-25-10.pem
  188. lrwxrwxrwx 1 k8s k8s 59 Sep 3 10:25 kubelet-client-current.pem -> /apps/kubernetes/ssl/kubelet-client-2019-09-03-10-25-10.pem
  189. -rw-r--r-- 1 k8s k8s 2153 Sep 3 10:25 kubelet.crt
  190. -rw------- 1 k8s k8s 1675 Sep 3 10:25 kubelet.key
  191. # 查看节点是否正常
  192. kubectl get node
  193. [root@]~]#kubectl get node
  194. NAME STATUS ROLES AGE VERSION
  195. master Ready k8s-node 27h v1.15.3

kube-router 部署

  1. # win on Ubuntu 操作
  2. # 创建kube-router 访问kube-apiserver 证书
  3. cat << EOF | tee /apps/work/k8s/cfssl/k8s/kube-router.json
  4. {
  5. "CN": "kube-router",
  6. "hosts": [""],
  7. "key": {
  8. "algo": "rsa",
  9. "size": 2048
  10. },
  11. "names": [
  12. {
  13. "C": "CN",
  14. "ST": "GuangDong",
  15. "L": "GuangZhou",
  16. "O": "system:masters",
  17. "OU": "Kubernetes-manual"
  18. }
  19. ]
  20. }
  21. EOF
  22. ## 生成 kube-router 证书和私钥
  23. cfssl gencert \
  24. -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  25. -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  26. -config=/apps/work/k8s/cfssl/ca-config.json \
  27. -profile=kubernetes \
  28. /apps/work/k8s/cfssl/k8s/kube-router.json | \
  29. cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/kube-router
  30. # 设置集群参数
  31. kubectl config set-cluster kubernetes \
  32. --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  33. --embed-certs=true \
  34. --server=${KUBE_APISERVER} \
  35. --kubeconfig=kubeconfig.conf
  36. # 设置客户端认证参数
  37. kubectl config set-credentials kube-router \
  38. --client-certificate=/apps/work/k8s/cfssl/pki/k8s/kube-router.pem \
  39. --client-key=/apps/work/k8s/cfssl/pki/k8s/kube-router-key.pem \
  40. --embed-certs=true \
  41. --kubeconfig=kubeconfig.conf
  42. # 设置上下文参数
  43. kubectl config set-context default \
  44. --cluster=kubernetes \
  45. --user=kube-router \
  46. --kubeconfig=kubeconfig.conf
  47. # 设置默认上下文
  48. kubectl config use-context default --kubeconfig=kubeconfig.conf
  49. # 创建kube-router configmap
  50. kubectl create configmap "kube-proxy" --from-file=kubeconfig.conf
  51. # 创建kubeadm-kuberouter-all-features-hostport.yaml
  52. vi kubeadm-kuberouter-all-features-hostport.yaml
  53. apiVersion: v1
  54. kind: ConfigMap
  55. metadata:
  56. name: kube-router-cfg
  57. namespace: kube-system
  58. labels:
  59. tier: node
  60. k8s-app: kube-router
  61. data:
  62. cni-conf.json: |
  63. {
  64. "cniVersion":"0.3.0",
  65. "name":"mynet",
  66. "plugins":[
  67. {
  68. "name":"kubernetes",
  69. "type":"bridge",
  70. "bridge":"kube-bridge",
  71. "isDefaultGateway":true,
  72. "ipam":{
  73. "type":"host-local"
  74. }
  75. },
  76. {
  77. "type":"portmap",
  78. "capabilities":{
  79. "snat":true,
  80. "portMappings":true
  81. }
  82. }
  83. ]
  84. }
  85. ---
  86. apiVersion: apps/v1
  87. kind: DaemonSet
  88. metadata:
  89. labels:
  90. k8s-app: kube-router
  91. tier: node
  92. name: kube-router
  93. namespace: kube-system
  94. spec:
  95. selector:
  96. matchLabels:
  97. k8s-app: kube-router
  98. template:
  99. metadata:
  100. labels:
  101. k8s-app: kube-router
  102. tier: node
  103. annotations:
  104. scheduler.alpha.kubernetes.io/critical-pod: ''
  105. spec:
  106. serviceAccountName: kube-router
  107. serviceAccount: kube-router
  108. containers:
  109. - name: kube-router
  110. image: docker.io/cloudnativelabs/kube-router
  111. imagePullPolicy: Always
  112. args:
  113. - --run-router=true
  114. - --run-firewall=true
  115. - --run-service-proxy=true
  116. - --advertise-cluster-ip=true
  117. - --advertise-loadbalancer-ip=true
  118. - --advertise-pod-cidr=true
  119. - --advertise-external-ip=true
  120. - --cluster-asn=64512
  121. - --metrics-path=/metrics
  122. - --metrics-port=20241
  123. - --enable-cni=true
  124. - --enable-ibgp=true
  125. - --enable-overlay=true
  126. - --nodeport-bindon-all-ip=true
  127. - --nodes-full-mesh=true
  128. - --enable-pod-egress=true
  129. - --cluster-cidr=10.67.0.0/16
  130. - --v=2
  131. - --kubeconfig=/var/lib/kube-router/kubeconfig
  132. env:
  133. - name: NODE_NAME
  134. valueFrom:
  135. fieldRef:
  136. fieldPath: spec.nodeName
  137. - name: KUBE_ROUTER_CNI_CONF_FILE
  138. value: /etc/cni/net.d/10-kuberouter.conflist
  139. livenessProbe:
  140. httpGet:
  141. path: /healthz
  142. port: 20244
  143. initialDelaySeconds: 10
  144. periodSeconds: 3
  145. resources:
  146. requests:
  147. cpu: 250m
  148. memory: 250Mi
  149. securityContext:
  150. privileged: true
  151. volumeMounts:
  152. - name: lib-modules
  153. mountPath: /lib/modules
  154. readOnly: true
  155. - name: cni-conf-dir
  156. mountPath: /etc/cni/net.d
  157. - name: kubeconfig
  158. mountPath: /var/lib/kube-router
  159. readOnly: true
  160. initContainers:
  161. - name: install-cni
  162. image: busybox
  163. imagePullPolicy: Always
  164. command:
  165. - /bin/sh
  166. - -c
  167. - set -e -x;
  168. if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
  169. if [ -f /etc/cni/net.d/*.conf ]; then
  170. rm -f /etc/cni/net.d/*.conf;
  171. fi;
  172. TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
  173. cp /etc/kube-router/cni-conf.json ${TMP};
  174. mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
  175. fi
  176. volumeMounts:
  177. - name: cni-conf-dir
  178. mountPath: /etc/cni/net.d
  179. - name: kube-router-cfg
  180. mountPath: /etc/kube-router
  181. hostNetwork: true
  182. tolerations:
  183. - key: CriticalAddonsOnly
  184. operator: Exists
  185. - effect: NoSchedule
  186. key: node-role.kubernetes.io/master
  187. operator: Exists
  188. - effect: NoSchedule
  189. key: node.kubernetes.io/not-ready
  190. operator: Exists
  191. - effect: NoSchedule
  192. key: node-role.kubernetes.io/ingress
  193. operator: Equal
  194. volumes:
  195. - name: lib-modules
  196. hostPath:
  197. path: /lib/modules
  198. - name: cni-conf-dir
  199. hostPath:
  200. path: /etc/cni/net.d
  201. - name: kube-router-cfg
  202. configMap:
  203. name: kube-router-cfg
  204. - name: kubeconfig
  205. configMap:
  206. name: kube-proxy
  207. items:
  208. - key: kubeconfig.conf
  209. path: kubeconfig
  210. ---
  211. apiVersion: v1
  212. kind: ServiceAccount
  213. metadata:
  214. name: kube-router
  215. namespace: kube-system
  216. ---
  217. kind: ClusterRole
  218. apiVersion: rbac.authorization.k8s.io/v1beta1
  219. metadata:
  220. name: kube-router
  221. namespace: kube-system
  222. rules:
  223. - apiGroups:
  224. - ""
  225. resources:
  226. - namespaces
  227. - pods
  228. - services
  229. - nodes
  230. - endpoints
  231. verbs:
  232. - list
  233. - get
  234. - watch
  235. - apiGroups:
  236. - "networking.k8s.io"
  237. resources:
  238. - networkpolicies
  239. verbs:
  240. - list
  241. - get
  242. - watch
  243. - apiGroups:
  244. - extensions
  245. resources:
  246. - networkpolicies
  247. verbs:
  248. - get
  249. - list
  250. - watch
  251. ---
  252. kind: ClusterRoleBinding
  253. apiVersion: rbac.authorization.k8s.io/v1beta1
  254. metadata:
  255. name: kube-router
  256. roleRef:
  257. apiGroup: rbac.authorization.k8s.io
  258. kind: ClusterRole
  259. name: kube-router
  260. subjects:
  261. - kind: ServiceAccount
  262. name: kube-router
  263. namespace: kube-system
  264. # 创建 kube-router 服务
  265. kubectl apply -f kubeadm-kuberouter-all-features-hostport.yaml
  266. # 查看服务是否成功创建
  267. [root@]~]#kubectl get pod -A | grep kube-router
  268. kube-system kube-router-5tmgw 1/1 Running 0 21h
  269. # 进入192.168.30.32
  270. cat /etc/cni/net.d/10-kuberouter.conflist
  271. [root@master ssl]# cat /etc/cni/net.d/10-kuberouter.conflist
  272. {"cniVersion":"0.3.0","name":"mynet","plugins":[{"bridge":"kube-bridge","ipam":{"subnet":"10.67.0.0/24","type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"},{"capabilities":{"portMappings":true,"snat":true},"type":"portmap"}]}
  273. # 已经获取到ip段
  274. ip a| grep kube
  275. [root@master ssl]# ip a| grep kube
  276. 4: kube-bridge: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
  277. inet 10.67.0.1/24 brd 10.67.0.255 scope global kube-bridge
  278. 6: kube-dummy-if: <BROADCAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default
  279. inet 10.66.0.1/32 brd 10.66.0.1 scope link kube-dummy-if
  280. inet 10.66.0.2/32 brd 10.66.0.2 scope link kube-dummy-if
  281. inet 10.66.91.125/32 brd 10.66.91.125 scope link kube-dummy-if
  282. inet 10.66.86.10/32 brd 10.66.86.10 scope link kube-dummy-if
  283. inet 10.66.52.216/32 brd 10.66.52.216 scope link kube-dummy-if
  284. # kube-router 部署正常

coredns 部署

  1. # win on Ubuntu 操作
  2. vi coredns.yaml
  3. # __MACHINE_GENERATED_WARNING__
  4. apiVersion: v1
  5. kind: ServiceAccount
  6. metadata:
  7. name: coredns
  8. namespace: kube-system
  9. labels:
  10. kubernetes.io/cluster-service: "true"
  11. addonmanager.kubernetes.io/mode: Reconcile
  12. ---
  13. apiVersion: rbac.authorization.k8s.io/v1
  14. kind: ClusterRole
  15. metadata:
  16. labels:
  17. kubernetes.io/bootstrapping: rbac-defaults
  18. addonmanager.kubernetes.io/mode: Reconcile
  19. name: system:coredns
  20. rules:
  21. - apiGroups:
  22. - ""
  23. resources:
  24. - endpoints
  25. - services
  26. - pods
  27. - namespaces
  28. verbs:
  29. - list
  30. - watch
  31. - apiGroups:
  32. - ""
  33. resources:
  34. - nodes
  35. verbs:
  36. - get
  37. ---
  38. apiVersion: rbac.authorization.k8s.io/v1
  39. kind: ClusterRoleBinding
  40. metadata:
  41. annotations:
  42. rbac.authorization.kubernetes.io/autoupdate: "true"
  43. labels:
  44. kubernetes.io/bootstrapping: rbac-defaults
  45. addonmanager.kubernetes.io/mode: EnsureExists
  46. name: system:coredns
  47. roleRef:
  48. apiGroup: rbac.authorization.k8s.io
  49. kind: ClusterRole
  50. name: system:coredns
  51. subjects:
  52. - kind: ServiceAccount
  53. name: coredns
  54. namespace: kube-system
  55. ---
  56. apiVersion: v1
  57. kind: ConfigMap
  58. metadata:
  59. name: coredns
  60. namespace: kube-system
  61. labels:
  62. addonmanager.kubernetes.io/mode: EnsureExists
  63. data:
  64. Corefile: |
  65. .:53 {
  66. errors
  67. health
  68. kubernetes cluster.local in-addr.arpa ip6.arpa {
  69. pods insecure
  70. upstream /etc/resolv.conf
  71. fallthrough in-addr.arpa ip6.arpa
  72. }
  73. prometheus :9153
  74. forward . /etc/resolv.conf
  75. cache 30
  76. reload
  77. loadbalance
  78. }
  79. ---
  80. apiVersion: apps/v1
  81. kind: Deployment
  82. metadata:
  83. name: coredns
  84. namespace: kube-system
  85. labels:
  86. k8s-app: kube-dns
  87. kubernetes.io/cluster-service: "true"
  88. addonmanager.kubernetes.io/mode: Reconcile
  89. kubernetes.io/name: "CoreDNS"
  90. spec:
  91. # replicas: not specified here:
  92. # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  93. # 2. Default is 1.
  94. # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  95. strategy:
  96. type: RollingUpdate
  97. rollingUpdate:
  98. maxUnavailable: 1
  99. selector:
  100. matchLabels:
  101. k8s-app: kube-dns
  102. template:
  103. metadata:
  104. labels:
  105. k8s-app: kube-dns
  106. annotations:
  107. seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
  108. spec:
  109. priorityClassName: system-cluster-critical
  110. serviceAccountName: coredns
  111. tolerations:
  112. - key: "CriticalAddonsOnly"
  113. operator: "Exists"
  114. nodeSelector:
  115. beta.kubernetes.io/os: linux
  116. containers:
  117. - name: coredns
  118. image: coredns/coredns
  119. imagePullPolicy: Always
  120. resources:
  121. limits:
  122. memory: 170Mi
  123. requests:
  124. cpu: 100m
  125. memory: 70Mi
  126. args: [ "-conf", "/etc/coredns/Corefile" ]
  127. volumeMounts:
  128. - name: config-volume
  129. mountPath: /etc/coredns
  130. readOnly: true
  131. ports:
  132. - containerPort: 53
  133. name: dns
  134. protocol: UDP
  135. - containerPort: 53
  136. name: dns-tcp
  137. protocol: TCP
  138. - containerPort: 9153
  139. name: metrics
  140. protocol: TCP
  141. livenessProbe:
  142. httpGet:
  143. path: /health
  144. port: 8080
  145. scheme: HTTP
  146. initialDelaySeconds: 60
  147. timeoutSeconds: 5
  148. successThreshold: 1
  149. failureThreshold: 5
  150. readinessProbe:
  151. httpGet:
  152. path: /health
  153. port: 8080
  154. scheme: HTTP
  155. securityContext:
  156. allowPrivilegeEscalation: false
  157. capabilities:
  158. add:
  159. - NET_BIND_SERVICE
  160. drop:
  161. - all
  162. readOnlyRootFilesystem: true
  163. dnsPolicy: Default
  164. volumes:
  165. - name: config-volume
  166. configMap:
  167. name: coredns
  168. items:
  169. - key: Corefile
  170. path: Corefile
  171. ---
  172. apiVersion: v1
  173. kind: Service
  174. metadata:
  175. name: kube-dns
  176. namespace: kube-system
  177. annotations:
  178. prometheus.io/port: "9153"
  179. prometheus.io/scrape: "true"
  180. labels:
  181. k8s-app: kube-dns
  182. kubernetes.io/cluster-service: "true"
  183. addonmanager.kubernetes.io/mode: Reconcile
  184. kubernetes.io/name: "CoreDNS"
  185. spec:
  186. selector:
  187. k8s-app: kube-dns
  188. clusterIP: 10.66.0.2
  189. ports:
  190. - name: dns
  191. port: 53
  192. protocol: UDP
  193. - name: dns-tcp
  194. port: 53
  195. protocol: TCP
  196. - name: metrics
  197. port: 9153
  198. protocol: TCP
  199. # 创建 CoreDNS dns 服务
  200. kubectl apply -f coredns.yaml
  201. # 验证服务
  202. [root@]~]#kubectl get all -A | grep coredns
  203. kube-system pod/coredns-597b77445b-fhxvr 1/1 Running 0 27h
  204. kube-system deployment.apps/coredns 1/1 1 1 27h
  205. kube-system replicaset.apps/coredns-597b77445b 1 1 1 27h
  206. dig @10.66.0.2 www.baidu.com
  207. [root@master ssl]# dig @10.66.0.2 www.baidu.com
  208. ; <<>> DiG 9.9.4-RedHat-9.9.4-74.el7_6.1 <<>> @10.66.0.2 www.baidu.com
  209. ; (1 server found)
  210. ;; global options: +cmd
  211. ;; Got answer:
  212. ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 40347
  213. ;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 13, ADDITIONAL: 27
  214. ;; OPT PSEUDOSECTION:
  215. ; EDNS: version: 0, flags:; udp: 4096
  216. ;; QUESTION SECTION:
  217. ;www.baidu.com. IN A
  218. ;; ANSWER SECTION:
  219. www.baidu.com. 30 IN CNAME www.a.shifen.com.
  220. www.a.shifen.com. 30 IN A 14.215.177.38
  221. www.a.shifen.com. 30 IN A 14.215.177.39
  222. ;; AUTHORITY SECTION:
  223. com. 30 IN NS h.gtld-servers.net.
  224. com. 30 IN NS m.gtld-servers.net.
  225. com. 30 IN NS g.gtld-servers.net.
  226. com. 30 IN NS d.gtld-servers.net.
  227. com. 30 IN NS a.gtld-servers.net.
  228. com. 30 IN NS j.gtld-servers.net.
  229. com. 30 IN NS c.gtld-servers.net.
  230. com. 30 IN NS l.gtld-servers.net.
  231. com. 30 IN NS b.gtld-servers.net.
  232. com. 30 IN NS f.gtld-servers.net.
  233. com. 30 IN NS k.gtld-servers.net.
  234. com. 30 IN NS i.gtld-servers.net.
  235. com. 30 IN NS e.gtld-servers.net.
  236. ;; ADDITIONAL SECTION:
  237. e.gtld-servers.net. 30 IN AAAA 2001:502:1ca1::30
  238. a.gtld-servers.net. 30 IN A 192.5.6.30
  239. i.gtld-servers.net. 30 IN AAAA 2001:503:39c1::30
  240. c.gtld-servers.net. 30 IN A 192.26.92.30
  241. g.gtld-servers.net. 30 IN AAAA 2001:503:eea3::30
  242. m.gtld-servers.net. 30 IN A 192.55.83.30
  243. d.gtld-servers.net. 30 IN A 192.31.80.30
  244. a.gtld-servers.net. 30 IN AAAA 2001:503:a83e::2:30
  245. b.gtld-servers.net. 30 IN A 192.33.14.30
  246. b.gtld-servers.net. 30 IN AAAA 2001:503:231d::2:30
  247. i.gtld-servers.net. 30 IN A 192.43.172.30
  248. d.gtld-servers.net. 30 IN AAAA 2001:500:856e::30
  249. l.gtld-servers.net. 30 IN A 192.41.162.30
  250. h.gtld-servers.net. 30 IN AAAA 2001:502:8cc::30
  251. e.gtld-servers.net. 30 IN A 192.12.94.30
  252. l.gtld-servers.net. 30 IN AAAA 2001:500:d937::30
  253. k.gtld-servers.net. 30 IN AAAA 2001:503:d2d::30
  254. j.gtld-servers.net. 30 IN AAAA 2001:502:7094::30
  255. m.gtld-servers.net. 30 IN AAAA 2001:501:b1f9::30
  256. f.gtld-servers.net. 30 IN A 192.35.51.30
  257. g.gtld-servers.net. 30 IN A 192.42.93.30
  258. h.gtld-servers.net. 30 IN A 192.54.112.30
  259. j.gtld-servers.net. 30 IN A 192.48.79.30
  260. k.gtld-servers.net. 30 IN A 192.52.178.30
  261. c.gtld-servers.net. 30 IN AAAA 2001:503:83eb::30
  262. f.gtld-servers.net. 30 IN AAAA 2001:503:d414::30
  263. ;; Query time: 6 msec
  264. ;; SERVER: 10.66.0.2#53(10.66.0.2)
  265. ;; WHEN: Wed Sep 04 14:17:05 CST 2019
  266. ;; MSG SIZE rcvd: 897
  267. dig @10.66.0.2 kube-dns.kube-system.svc.cluster.local
  268. [root@master ssl]# dig @10.66.0.2 kube-dns.kube-system.svc.cluster.local
  269. ; <<>> DiG 9.9.4-RedHat-9.9.4-74.el7_6.1 <<>> @10.66.0.2 kube-dns.kube-system.svc.cluster.local
  270. ; (1 server found)
  271. ;; global options: +cmd
  272. ;; Got answer:
  273. ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 40471
  274. ;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
  275. ;; WARNING: recursion requested but not available
  276. ;; OPT PSEUDOSECTION:
  277. ; EDNS: version: 0, flags:; udp: 4096
  278. ;; QUESTION SECTION:
  279. ;kube-dns.kube-system.svc.cluster.local. IN A
  280. ;; ANSWER SECTION:
  281. kube-dns.kube-system.svc.cluster.local. 5 IN A 10.66.0.2
  282. ;; Query time: 1 msec
  283. ;; SERVER: 10.66.0.2#53(10.66.0.2)
  284. ;; WHEN: Wed Sep 04 14:19:13 CST 2019
  285. ;; MSG SIZE rcvd: 121
  286. 能够正常解析

traefik-https 部署

  1. # win on Ubuntu 操作
  2. #申请证书 请使用letsencrypt 申请免费 多域名证书
  3. #重命名证书名字
  4. tls.crt
  5. tls.key
  6. #创建 secret
  7. kubectl -n kube-system create secret generic tls-cert --from-file=tls.key --from-file=tls.crt
  8. # 创建traefik 配置
  9. vi traefik.toml
  10. defaultEntryPoints = ["http","https"]
  11. [entryPoints]
  12. [entryPoints.http]
  13. address = ":80"
  14. entryPoint = "https"
  15. [entryPoints.https]
  16. address = ":443"
  17. [entryPoints.https.tls]
  18. [[entryPoints.https.tls.certificates]]
  19. certFile = "/certs/tls.crt"
  20. keyFile = "/certs/tls.key"
  21. # 生成 configmap
  22. kubectl create configmap traefik-conf --from-file=traefik.toml -n kube-system
  23. # 创建traefik-rbac
  24. vi traefik-rbac.yaml
  25. ---
  26. kind: ClusterRole
  27. apiVersion: rbac.authorization.k8s.io/v1beta1
  28. metadata:
  29. name: traefik
  30. namespace: kube-system
  31. rules:
  32. - apiGroups:
  33. - ""
  34. resources:
  35. - services
  36. - endpoints
  37. - secrets
  38. verbs:
  39. - get
  40. - list
  41. - watch
  42. - apiGroups:
  43. - extensions
  44. resources:
  45. - ingresses
  46. verbs:
  47. - get
  48. - list
  49. - watch
  50. - apiGroups:
  51. - extensions
  52. resources:
  53. - ingresses/status
  54. verbs:
  55. - update
  56. ---
  57. kind: ClusterRoleBinding
  58. apiVersion: rbac.authorization.k8s.io/v1beta1
  59. metadata:
  60. name: traefik
  61. roleRef:
  62. apiGroup: rbac.authorization.k8s.io
  63. kind: ClusterRole
  64. name: traefik
  65. subjects:
  66. - kind: ServiceAccount
  67. name: traefik
  68. namespace: kube-system
  69. # traefik-deployment-https
  70. vi traefik-deployment-https.yaml
  71. ---
  72. apiVersion: v1
  73. kind: ServiceAccount
  74. metadata:
  75. name: traefik
  76. namespace: kube-system
  77. ---
  78. kind: Deployment
  79. apiVersion: apps/v1
  80. metadata:
  81. name: traefik
  82. namespace: kube-system
  83. labels:
  84. k8s-app: traefik
  85. spec:
  86. replicas: 1
  87. selector:
  88. matchLabels:
  89. k8s-app: traefik
  90. template:
  91. metadata:
  92. labels:
  93. k8s-app: traefik
  94. name: traefik
  95. spec:
  96. serviceAccountName: traefik
  97. terminationGracePeriodSeconds: 60
  98. volumes:
  99. - name: ssl
  100. secret:
  101. secretName: tls-cert
  102. - name: config
  103. configMap:
  104. name: traefik-conf
  105. defaultMode: 0644
  106. items:
  107. - key: traefik.toml
  108. path: traefik.toml
  109. hostNetwork: true
  110. dnsPolicy: ClusterFirstWithHostNet
  111. containers:
  112. - image: traefik
  113. name: traefik
  114. imagePullPolicy: IfNotPresent
  115. volumeMounts:
  116. - mountPath: /certs
  117. name: "ssl"
  118. - mountPath: /etc/traefik.toml
  119. subPath: traefik.toml
  120. name: "config"
  121. ports:
  122. - name: http
  123. containerPort: 80
  124. hostPort: 80
  125. - name: https
  126. containerPort: 443
  127. hostPort: 443
  128. - name: admin
  129. containerPort: 8080
  130. args:
  131. - --api
  132. - --web
  133. - --api.dashboard
  134. - --logLevel=INFO
  135. - --web.metrics
  136. - --metrics.prometheus
  137. - --web.metrics.prometheus
  138. - --kubernetes
  139. - --traefiklog
  140. - --traefiklog.format=json
  141. - --accesslog
  142. - --accesslog.format=json
  143. - --accessLog.fields.headers.defaultMode=redact
  144. - --insecureskipverify=true
  145. - --configFile=/etc/traefik.toml
  146. # nodeSelector:
  147. # ingress: "yes"
  148. # tolerations:
  149. # - effect: NoSchedule
  150. # key: node-role.kubernetes.io/ingress
  151. # operator: Equal
  152. ---
  153. kind: Service
  154. apiVersion: v1
  155. metadata:
  156. labels:
  157. k8s-app: traefik
  158. name: traefik
  159. namespace: kube-system
  160. spec:
  161. selector:
  162. k8s-app: traefik
  163. clusterIP: None
  164. ports:
  165. - protocol: TCP
  166. port: 80
  167. name: http
  168. - protocol: TCP
  169. port: 443
  170. name: https
  171. - protocol: TCP
  172. port: 8080
  173. name: admin
  174. type: ClusterIP
  175. # 或者traefik-daemonset-https
  176. ---
  177. apiVersion: v1
  178. kind: ServiceAccount
  179. metadata:
  180. name: traefik
  181. namespace: kube-system
  182. ---
  183. kind: DaemonSet
  184. apiVersion: apps/v1
  185. metadata:
  186. name: traefik
  187. namespace: kube-system
  188. labels:
  189. k8s-app: traefik
  190. spec:
  191. selector:
  192. matchLabels:
  193. k8s-app: traefik
  194. template:
  195. metadata:
  196. labels:
  197. k8s-app: traefik
  198. name: traefik
  199. spec:
  200. serviceAccountName: traefik
  201. terminationGracePeriodSeconds: 60
  202. volumes:
  203. - name: ssl
  204. secret:
  205. secretName: tls-cert
  206. - name: config
  207. configMap:
  208. name: traefik-conf
  209. defaultMode: 0644
  210. items:
  211. - key: traefik.toml
  212. path: traefik.toml
  213. hostNetwork: true
  214. dnsPolicy: ClusterFirstWithHostNet
  215. containers:
  216. - image: traefik
  217. name: traefik
  218. imagePullPolicy: IfNotPresent
  219. volumeMounts:
  220. - mountPath: /certs
  221. name: "ssl"
  222. - mountPath: /etc/traefik.toml
  223. subPath: traefik.toml
  224. name: "config"
  225. ports:
  226. - name: http
  227. containerPort: 80
  228. hostPort: 80
  229. - name: https
  230. containerPort: 443
  231. hostPort: 443
  232. - name: admin
  233. containerPort: 8080
  234. securityContext:
  235. capabilities:
  236. drop:
  237. - ALL
  238. add:
  239. - NET_BIND_SERVICE
  240. args:
  241. - --api
  242. - --web
  243. - --api.dashboard
  244. - --logLevel=INFO
  245. - --web.metrics
  246. - --metrics.prometheus
  247. - --web.metrics.prometheus
  248. - --kubernetes
  249. - --traefiklog
  250. - --traefiklog.format=json
  251. - --accesslog
  252. - --accesslog.format=json
  253. - --accessLog.fields.headers.defaultMode=redact
  254. - --insecureskipverify=true
  255. - --configFile=/etc/traefik.toml
  256. nodeSelector:
  257. ingress: "yes"
  258. tolerations:
  259. - effect: NoSchedule
  260. key: node-role.kubernetes.io/ingress
  261. operator: Equal
  262. ---
  263. kind: Service
  264. apiVersion: v1
  265. metadata:
  266. labels:
  267. k8s-app: traefik
  268. name: traefik
  269. namespace: kube-system
  270. spec:
  271. selector:
  272. k8s-app: traefik
  273. clusterIP: None
  274. ports:
  275. - protocol: TCP
  276. port: 80
  277. name: http
  278. - protocol: TCP
  279. port: 443
  280. name: https
  281. - protocol: TCP
  282. port: 8080
  283. name: admin
  284. type: ClusterIP
  285. #创建 traefik-dashboard
  286. vi traefik-dashboard.yam
  287. apiVersion: extensions/v1beta1
  288. kind: Ingress
  289. metadata:
  290. name: traefik-dashboard
  291. namespace: kube-system
  292. annotations:
  293. kubernetes.io/ingress.class: traefik
  294. traefik.ingress.kubernetes.io/frontend-entry-points: http,https
  295. spec:
  296. rules:
  297. - host: trae.xxx.com
  298. http:
  299. paths:
  300. - backend:
  301. serviceName: traefik
  302. servicePort: 8080
  303. tls:
  304. - secretName: tls-cert
  305. # 创建 服务器
  306. kubectl apply -f traefik-deployment-https.yaml
  307. kubectl apply -f traefik-rbac.yaml
  308. kubectl apply -f traefik-dashboard.yaml
  309. hosts 绑定 访问 trae.xxx.com 是否正常
  310. 能正常打开证明正常

kubernetes-dashboard 部署

  1. # win on Ubuntu 操作
  2. # 创建 kubernetes-dashboard 使用证书
  3. cat << EOF | tee /apps/work/k8s/cfssl/k8s/dashboard.json
  4. {
  5. "CN": "dashboard",
  6. "hosts": [""],
  7. "key": {
  8. "algo": "rsa",
  9. "size": 2048
  10. },
  11. "names": [
  12. {
  13. "C": "CN",
  14. "ST": "GuangDong",
  15. "L": "GuangZhou",
  16. "O": "cluster",
  17. "OU": "cluster"
  18. }
  19. ]
  20. }
  21. EOF
  22. ##### 生成kubernetes-dashboard 证书,当然如果有外部签发的证书也可以使用
  23. cfssl gencert \
  24. -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  25. -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  26. -config=/apps/work/k8s/cfssl/ca-config.json \
  27. -profile=kubernetes \
  28. /apps/work/k8s/cfssl/k8s/dashboard.json | \
  29. cfssljson -bare ./dashboard
  30. # base64 加密
  31. cat dashboard.pem|base64 | tr -d '\n'
  32. cat dashboard-key.pem|base64 | tr -d '\n'
  33. # 做好记录
  34. # kubernetes-dashboard
  35. vi kubernetes-dashboard.yaml
  36. # Copyright 2017 The Kubernetes Authors.
  37. #
  38. # Licensed under the Apache License, Version 2.0 (the "License");
  39. # you may not use this file except in compliance with the License.
  40. # You may obtain a copy of the License at
  41. #
  42. # http://www.apache.org/licenses/LICENSE-2.0
  43. #
  44. # Unless required by applicable law or agreed to in writing, software
  45. # distributed under the License is distributed on an "AS IS" BASIS,
  46. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  47. # See the License for the specific language governing permissions and
  48. # limitations under the License.
  49. apiVersion: v1
  50. kind: Namespace
  51. metadata:
  52. name: kubernetes-dashboard
  53. ---
  54. apiVersion: v1
  55. kind: ServiceAccount
  56. metadata:
  57. labels:
  58. k8s-app: kubernetes-dashboard
  59. name: kubernetes-dashboard
  60. namespace: kubernetes-dashboard
  61. ---
  62. kind: Service
  63. apiVersion: v1
  64. metadata:
  65. labels:
  66. k8s-app: kubernetes-dashboard
  67. name: kubernetes-dashboard
  68. namespace: kubernetes-dashboard
  69. annotations:
  70. kubernetes.io/ingress.class: traefik
  71. spec:
  72. ports:
  73. - port: 443
  74. targetPort: 8443
  75. selector:
  76. k8s-app: kubernetes-dashboard
  77. ---
  78. apiVersion: v1
  79. kind: Secret
  80. metadata:
  81. labels:
  82. k8s-app: kubernetes-dashboard
  83. name: kubernetes-dashboard-certs
  84. namespace: kubernetes-dashboard
  85. type: Opaque
  86. data:
  87. dashboard.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFlFV0MxbGlqcnFzNW5vcHBxTXF0YzZSY0pnSWFJSGhGemZZUWhRQm5pK0Vjam8vCkRTUkYvY3BUOFlkTTg2MVpEV1lSN1FEelFLNmJUTmRLWXJJYmpVWHJpRFVFU01EUW13Y1VteTMzWjFpeXR6K0wKUUVmTVFvWVNReGVIY2RqUHp3bUhFS0todk9vNmxQTHNFWkMwQ3ZCamw2VHlERjhuSDEzby9kRlRVbGJhWUlGaQpPeGVIWkxMMTZKbmNLK3RVaW9ncjdLekFKMUkxTjdwOVQ1blZ5YU9PbWNCVEFnU3RJM0ZwSzdMZG1zaVU0ZEZ0CkpSSFZ0eTh5Y3dCSU9wWnhqV29mM2ROVkRrVUFsYjVtV2psU0RaQ2lhYmFYQi91NmJ0R0k3RlY2cENaUzdDVG4KeWlpUFlFSXRPSGRCT0VycGpKZWQ0bHQ5K2MvNDE3UTRIaiswdndJREFRQUJBb0lCQVFDK1daSWdjQTZRRnhScQpzVlNST1BNQjlFdXlJNlQrN0NZL2xXQUZGM2tUdHlKRVlTVEJpck0yVFprbjBFbjNGSndlVU1CNEZwRmJScTJBCm1vSWpxeHJveG5taGRjOWlPd3NTVHZtcU1kd2ZLNXBiQ0pBeDdNRE5ZS0FiTDRNbjAxazlaaVpaZnhTNG1WcksKa1hHNTRDZlYzeWR0VU5qRDJiVkFBdWQ2TVJQSDV5QWJTVktsMG9ONkRCaFV4MlYyWEo0WnRUVHE0b3R6VGYxZwp3SjNJeVFjSXl3czE2V3dkeHpuYStqVmpOYU5OQ3ZCT1BMbm9TeXZBQXZGRG9UYmUrMG1tcnZLVmlSeDBDT1FzCkUwNjFtNHY2eUExL3locndkT1BDYXN6SkpjWlYzOThJTzFKb2QxUHk3OU9aT1FpY1FEOGhwQmxqb0FSQ2JlY3QKRFFPcG5CR0JBb0dCQVBhYlJSSGpPTkxIQ25JZWlFQU1EYXNwQXo2RGxRNkQvdWNNdzROdkVPRVNVa3dvQ0p4cApwK1hJeVVzT1B1d2swTzVCcHJRcHZjdGYyWXlLZTFtR25iVUpmUVNWNGpLdWpqb0M0OWhOWk9lSE8zd0xMcnNXCkl1SU1Qeko0TjhxSzl0dUpDQ3BVYUZFVzRiN1R2OGsyK1pJWHJwN3hzNklDd01EUnpTaW9wY0hCQW9HQkFNMEgKQVl1bmdzY3hTM2JnZ05idU5sQ3lIOHBLZFVPbi95cU9IQUdYcG9vZmJUbXJiUUlWN0ZOVSszUTlYc2ErVVE0QwpUbVdFbzhabVhrL3lIV2FDVWxpRkN0ckRhTzNUZVhvb2pia1JyaDcxakFXN0pjVDRVZ1ZwcG1RakFVUW8vOWtVCmxHMUNpOTFZZy94dlV5dHlYM1BnZHJ6SnU2aWNsM1pVZ1h3dzNoWi9Bb0dBZENmY2w3bFVLWXZSTXNHSTRjb0wKb2lRMlAvclFlYjdZa05IbFFZSk9EQVdLT0E3ZlIzVkl2U1lmRWpoS2tRWWlWeWNiTTE4NTQ1SnBNUmFGVlR6ZwpDY2JIV1NLVUlkVXdic2l2czFGNUJza2V6cVdoeEVOLytNTlYvUnE5QkswQjY1UVhBWUV5aFlkbW0zQzN0RG90CndZOWdFOE83SGNONE1ScGhMUmFLeE1FQ2dZRUFoS2E5eHorUUM1VEhRSmlzZzJNSVhWbUIyLzRrdEt0akdvTnIKZDFSSStpQ3ZLSnJUSW9CUXNQSFE1em8xc2R5ODBKV0paNEZUL1MrS1lhdENmbXBmSU1xalpUcjlEcksrYTkwRgpKUEpkZDhaaTIrcGoyM2JXaW8zNmk5dGlIRmx5ZjE4alVUVzNESFVTb0NiZTVzTlBJc2ZkeXZPeXFMcjMvQ1ZjCnlaOU1jYjBDZ1lBMVp2RVM3bU42Nm10T2JpSlR3a3hhaTVvS2tHbDdHTDJkZXJFUmxsc1YrNWRCSVY4dG5DTnAKT2tjMFlMbHV2TEg4cG4zd2VCNzg5dUFCQjNXYmNKcHg0L2NIRm9oZDNhdlR0RThRVjJod0tNS2RKQVBvTHNoMgprK2lEUWd1dmFxSzNmL1RYUW43bWU3dWFqSDk3SXZldXJtWWsvVmRJY0dicnd1SVRzd0FEYWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
  88. dashboard.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ5ekNDQXQrZ0F3SUJBZ0lVUWRIVXdKS1JYc1ZRb2VYS1JDTjd0eVcwWU04d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURjd05ERXhNVE13TUZvWERUSTVNRGN3TVRFeE1UTXcKTUZvd2JURUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUkl3CkVBWURWUVFERXdsa1lYTm9ZbTloY21Rd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFERmdSWUxXV0tPdXF6bWVpbW1veXExenBGd21BaG9nZUVYTjloQ0ZBR2VMNFJ5T2o4TkpFWDl5bFB4aDB6egpyVmtOWmhIdEFQTkFycHRNMTBwaXNodU5SZXVJTlFSSXdOQ2JCeFNiTGZkbldMSzNQNHRBUjh4Q2hoSkRGNGR4CjJNL1BDWWNRb3FHODZqcVU4dXdSa0xRSzhHT1hwUElNWHljZlhlajkwVk5TVnRwZ2dXSTdGNGRrc3ZYb21kd3IKNjFTS2lDdnNyTUFuVWpVM3VuMVBtZFhKbzQ2WndGTUNCSzBqY1drcnN0MmF5SlRoMFcwbEVkVzNMekp6QUVnNgpsbkdOYWgvZDAxVU9SUUNWdm1aYU9WSU5rS0pwdHBjSCs3cHUwWWpzVlhxa0psTHNKT2ZLS0k5Z1FpMDRkMEU0ClN1bU1sNTNpVzMzNXovalh0RGdlUDdTL0FnTUJBQUdqZ1kwd2dZb3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEcKQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjBHQTFVZApEZ1FXQkJURTl6cWx4dkErRXMrbE8zWlFEMlhubGFHRFpqQWZCZ05WSFNNRUdEQVdnQlJ4NEtjQVJjYWtSL2J4Cm13b1RCZURzK3hBb2FUQUxCZ05WSFJFRUJEQUNnZ0F3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJnWHZwTEMKQjIybXlQaURlZnhsWGNZRzAvY0R2RXlYcTlENWtKTnBxKzFZQ0EvMlp2RDIyN1Q5VjY3aHVyTlA3T2FvSG95Tgo0MHpkR3lZTGRNV3pyZTQwVksxdC84N3pDTENzamt1ZXRCRWEwNVRqUTJhbDRhSzJ6TXl5MkJLWEpYbjlvdkhzCjJwNndvL001eklEOXl2OEhyRkZqWHM3NitTUTFzNXpOdUxuaDBET0Z1SktiZUZxSUJyNmZRbXlsb0l1VURtZjYKcGtQYkJyRnJpNHFGS0lDcVZKRCt3Z01zRFBiclVMZXF5NWlBVjNqRzJKMFgxOE4zdklCeUFwdWhZbjNudlV0TwpLREVIWkFJcFpjRWdqQ2ZLVDNyaERLL3JLN0VFZkxLcGlCdGJya3pFbjVWV3FQUFJEK3ZPU2VySldETDl1K0xyCmhEazlvZ084cmNqQzZGdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
  89. ---
  90. apiVersion: v1
  91. kind: Secret
  92. metadata:
  93. labels:
  94. k8s-app: kubernetes-dashboard
  95. name: dashboard-tls-cert
  96. namespace: kubernetes-dashboard
  97. type: Opaque
  98. data:
  99. tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUdYekNDQlVlZ0F3SUJBZ0lTQkdVcDlSaVAvK2lNMDVYM0FuY0FUeTg1TUEwR0NTcUdTSWIzRFFFQkN3VUEKTUVveEN6QUpCZ05WQkFZVEFsVlRNUll3RkFZRFZRUUtFdzFNWlhRbmN5QkZibU55ZVhCME1TTXdJUVlEVlFRRApFeHBNWlhRbmN5QkZibU55ZVhCMElFRjFkR2h2Y21sMGVTQllNekFlRncweE9UQTNNRGt3T1RJNU1ESmFGdzB4Ck9URXdNRGN3T1RJNU1ESmFNQll4RkRBU0JnTlZCQU1UQzIxa1pHZGhiV1V1WTI5dE1JSUNJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQWc4QU1JSUNDZ0tDQWdFQW9mSVdOdTE4YUp1T3Jzd0JjZE9lODN0dWpXZ2dpUXl0VVYxQwpqNVhYbzNjQTM1L2ZxQXNGVHpJRGNwUmxhTGJ6SHd1d1psOWNSKzJuRENaUzI4VlhZaXcrSkQvQXpna3FzTHFJCjZ3YlFhcHNCa1lYUzRuT1UrZzhSMVgwcm52ckpickE1eHFJSWJKM002ajVLTXZ4RktvMEV3YXNBY2NiYlVGOW4KMHQ2RzNreG4zWW1Sek5HeHh1bXZ4V2prNWNkSWMza0MyT1VuRktGOG5XemJab2JiNk9PUnZSaElEWW5YdjkxdgoyMUYwQnZ0Q21GY0FEaDRqZXUrLzNKVDVLcEJkdkFHOHI3aU1wbkhKaFU1alhqTXlPRytMbkcvcnJuRzJGaXpHCmx1UHQwKzRlK0ZRSXFZY1BUM1cyTUF2ZDlzQTNEMThsUW82M00vZlMyYjNIYVNidFY0b1pmNS9zTzJNeEVPVnoKVEd1M0NxYk40TkcrZE8ycXoxYWxMQmlGZlVjNEdmUVpYRmlLaDFzazl3Qm5zeWhqYUZmdUx6bHRxMDg3STJLYQorVlRaUzFQSlJFbGduM3UwY1FmaENjelF5ZTJ3Vjl6RE9lVmUxeTBjLzZ0RWJhNllCeGR2ZGcwOFpKL0QwYTBLCnJvWlVJMW5Rc2RKeE8rQ3N1OURLYjROZzJCYnZkWVpHVWJrSCtSUDU0UUdrS1VnYnVxNVIwbXI0U1I2VUwrRE4KZjNxem81a3ZiMXVRWXFpaDZYUFVDVUVPOTNOU1Y2MTNUSUVOTUpyYjVhbGRLUkhPZlpWL201QThlUy9ibFFYcgpOV3FCRy9OL2RtckZjMmcyNGJEY3d5OXIzL3FkNy9MTWxmMVRVdzJGczR3M2x2VHJFanlwWEZhQ3BRRGxkc0xJCkYwcWVKVnNDQXdFQUFhT0NBbkV3Z2dKdE1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVHUUNXOGNFbgpaNWhVWjBDa004QW03Wjh6NGJNd0h3WURWUjBqQkJnd0ZvQVVxRXBxWXdSOTNicm0wVG0zcGtWbDcvT283S0V3CmJ3WUlLd1lCQlFVSEFRRUVZekJoTUM0R0NDc0dBUVVGQnpBQmhpSm9kSFJ3T2k4dmIyTnpjQzVwYm5RdGVETXUKYkdWMGMyVnVZM0o1Y0hRdWIzSm5NQzhHQ0NzR0FRVUZCekFDaGlOb2RIUndPaTh2WTJWeWRDNXBiblF0ZURNdQpiR1YwYzJWdVkzSjVjSFF1YjNKbkx6QWxCZ05WSFJFRUhqQWNnZzBxTG0xa1pHZGhiV1V1WTI5dGdndHRaR1JuCllXMWxMbU52YlRCTUJnTlZIU0FFUlRCRE1BZ0dCbWVCREFFQ0FUQTNCZ3NyQmdFRUFZTGZFd0VCQVRBb01DWUcKQ0NzR0FRVUZCd0lCRmhwb2RIUndPaTh2WTNCekxteGxkSE5sYm1OeWVYQjBMbTl5WnpDQ0FRWUdDaXNHQVFRQgoxbmtDQkFJRWdmY0VnZlFBOGdCM0FPSnBTNjRtNk9sQUNlaUdHN1k3ZzlRKzUvNTBpUHVranlpVEFaM2Q4ZHYrCkFBQUJhOVpIamZBQUFBUURBRWd3UmdJaEFKNXBWaDFDSEpmcTFhd2NOYmxEU2FwL1prQmVBeXU5ajcrTVhISnMKTEI3TUFpRUFwM2xLVVNCZXpiQWpodkZWSTBGR3ZFWmtzU2lYKyt3SitiZ3VLOXlaS3JBQWR3QXBQRkdXVk1nNQpaYnFxVVB4WUI5UzNiNzlZZWlseTNLVEREUFRsUlVmMGVBQUFBV3ZXUjQzd0FBQUVBd0JJTUVZQ0lRRDI1L1NHClcrWHRDa2VzaHViekZtUnRnaDUrWXMxaXpnSG5CSmtOS1Z0cE9nSWhBT1lteWJCWjV3RjZBeE5UT29WdnkyYVMKNktEdURyWmRzSVYrN251WkhFSDdNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUNjRHFwTzF3OWdNbzJGaW1GTgpwSUlxT3d1N2hsUWVURU44enY1UmFiYWtGelpvZlhURXpRcGNtSlNWRUhET25MVGpjaWpITWxtbGdIbndTM2w0CjAyWFB0akIzUWJUNFRWUHlqUGpBZ1ZvL1ZmclNJT2N5S1pKRDNJMWxLNXV1anRCdGF3Rnh3cjBYeGd1Q2k5TlUKdlQ2R0RxYnlaVVdiL1I0bXVVYzFwRzMySVJiS3BxQnZveitsaGRMNHdOb1M5YXdiUlg3LzBmUytEZUZiZ09vbgpzYnBDYTFQeFdqWHYwNloxNkF0LzBRTlVZLzExdEw4bTRDK3Q2OW5kOUt6eUdRZmdOank2NmM1RmhIODVBQkNFClJ6L3NoVkdyb1lTQkh3M1Q0c0NKZnh4dW5oK0tVZ0dvRFk5VUc5RzI2T200eHgvWFQ5OTZONTNxUytPS21iY0wKajVJMgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJRWtqQ0NBM3FnQXdJQkFnSVFDZ0ZCUWdBQUFWT0ZjMm9MaGV5bkNEQU5CZ2txaGtpRzl3MEJBUXNGQURBLwpNU1F3SWdZRFZRUUtFeHRFYVdkcGRHRnNJRk5wWjI1aGRIVnlaU0JVY25WemRDQkRieTR4RnpBVkJnTlZCQU1UCkRrUlRWQ0JTYjI5MElFTkJJRmd6TUI0WERURTJNRE14TnpFMk5EQTBObG9YRFRJeE1ETXhOekUyTkRBME5sb3cKU2pFTE1Ba0dBMVVFQmhNQ1ZWTXhGakFVQmdOVkJBb1REVXhsZENkeklFVnVZM0o1Y0hReEl6QWhCZ05WQkFNVApHa3hsZENkeklFVnVZM0o1Y0hRZ1FYVjBhRzl5YVhSNUlGZ3pNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUFuTk1NOEZybExrZTNjbDAzZzdOb1l6RHExelVtR1NYaHZiNDE4WENTTDdlNFMwRUYKcTZtZU5RaFk3TEVxeEdpSEM2UGpkZVRtODZkaWNicDVnV0FmMTVHYW4vUFFlR2R4eUdrT2xaSFAvdWFaNldBOApTTXgreWsxM0VpU2RSeHRhNjduc0hqY0FISnlzZTZjRjZzNUs2NzFCNVRhWXVjdjliVHlXYU44aktrS1FESVowClo4aC9wWnE0VW1FVUV6OWw2WUtIeTl2NkRsYjJob256aFQrWGhxK3czQnJ2YXcyVkZuM0VLNkJsc3BrRU5uV0EKYTZ4Szh4dVFTWGd2b3BaUEtpQWxLUVRHZE1EUU1jMlBNVGlWRnJxb003aEQ4YkVmd3pCL29ua3hFejB0TnZqagovUEl6YXJrNU1jV3Z4STBOSFdRV002cjZoQ20yMUF2QTJIM0Rrd0lEQVFBQm80SUJmVENDQVhrd0VnWURWUjBUCkFRSC9CQWd3QmdFQi93SUJBREFPQmdOVkhROEJBZjhFQkFNQ0FZWXdmd1lJS3dZQkJRVUhBUUVFY3pCeE1ESUcKQ0NzR0FRVUZCekFCaGlab2RIUndPaTh2YVhOeVp5NTBjblZ6ZEdsa0xtOWpjM0F1YVdSbGJuUnlkWE4wTG1OdgpiVEE3QmdnckJnRUZCUWN3QW9ZdmFIUjBjRG92TDJGd2NITXVhV1JsYm5SeWRYTjBMbU52YlM5eWIyOTBjeTlrCmMzUnliMjkwWTJGNE15NXdOMk13SHdZRFZSMGpCQmd3Rm9BVXhLZXhwSHNzY2ZyYjRVdVFkZi9FRldDRmlSQXcKVkFZRFZSMGdCRTB3U3pBSUJnWm5nUXdCQWdFd1B3WUxLd1lCQkFHQzN4TUJBUUV3TURBdUJnZ3JCZ0VGQlFjQwpBUllpYUhSMGNEb3ZMMk53Y3k1eWIyOTBMWGd4TG14bGRITmxibU55ZVhCMExtOXlaekE4QmdOVkhSOEVOVEF6Ck1ER2dMNkF0aGl0b2RIUndPaTh2WTNKc0xtbGtaVzUwY25WemRDNWpiMjB2UkZOVVVrOVBWRU5CV0RORFVrd3UKWTNKc01CMEdBMVVkRGdRV0JCU29TbXBqQkgzZHV1YlJPYmVtUldYdjg2anNvVEFOQmdrcWhraUc5dzBCQVFzRgpBQU9DQVFFQTNUUFhFZk5qV0RqZEdCWDdDVlcrZGxhNWNFaWxhVWNuZThJa0NKTHhXaDlLRWlrM0pIUlJIR0pvCnVNMlZjR2ZsOTZTOFRpaFJ6WnZvcm9lZDZ0aTZXcUVCbXR6dzNXb2RhdGcrVnlPZXBoNEVZcHIvMXdYS3R4OC8Kd0FwSXZKU3d0bVZpNE1GVTVhTXFyU0RFNmVhNzNNajJ0Y015bzVqTWQ2am1lV1VISzhzby9qb1dVb0hPVWd3dQpYNFBvMVFZeiszZHN6a0RxTXA0ZmtseEJ3WFJzVzEwS1h6UE1UWitzT1BBdmV5eGluZG1qa1c4bEd5K1FzUmxHClBmWitHNlo2aDdtamVtMFkraVdsa1ljVjRQSVdMMWl3Qmk4c2FDYkdTNWpOMnA4TStYK1E3VU5LRWtST2IzTjYKS09xa3FtNTdUSDJIM2VESkFrU25oNi9ETkZ1MFFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
  100. tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBb2ZJV051MThhSnVPcnN3QmNkT2U4M3R1aldnZ2lReXRVVjFDajVYWG8zY0EzNS9mCnFBc0ZUeklEY3BSbGFMYnpId3V3Wmw5Y1IrMm5EQ1pTMjhWWFlpdytKRC9BemdrcXNMcUk2d2JRYXBzQmtZWFMKNG5PVStnOFIxWDBybnZySmJyQTV4cUlJYkozTTZqNUtNdnhGS28wRXdhc0FjY2JiVUY5bjB0Nkcza3huM1ltUgp6Tkd4eHVtdnhXams1Y2RJYzNrQzJPVW5GS0Y4bld6YlpvYmI2T09SdlJoSURZblh2OTF2MjFGMEJ2dENtRmNBCkRoNGpldSsvM0pUNUtwQmR2QUc4cjdpTXBuSEpoVTVqWGpNeU9HK0xuRy9ycm5HMkZpekdsdVB0MCs0ZStGUUkKcVljUFQzVzJNQXZkOXNBM0QxOGxRbzYzTS9mUzJiM0hhU2J0VjRvWmY1L3NPMk14RU9WelRHdTNDcWJONE5HKwpkTzJxejFhbExCaUZmVWM0R2ZRWlhGaUtoMXNrOXdCbnN5aGphRmZ1THpsdHEwODdJMkthK1ZUWlMxUEpSRWxnCm4zdTBjUWZoQ2N6UXllMndWOXpET2VWZTF5MGMvNnRFYmE2WUJ4ZHZkZzA4WkovRDBhMEtyb1pVSTFuUXNkSngKTytDc3U5REtiNE5nMkJidmRZWkdVYmtIK1JQNTRRR2tLVWdidXE1UjBtcjRTUjZVTCtETmYzcXpvNWt2YjF1UQpZcWloNlhQVUNVRU85M05TVjYxM1RJRU5NSnJiNWFsZEtSSE9mWlYvbTVBOGVTL2JsUVhyTldxQkcvTi9kbXJGCmMyZzI0YkRjd3k5cjMvcWQ3L0xNbGYxVFV3MkZzNHczbHZUckVqeXBYRmFDcFFEbGRzTElGMHFlSlZzQ0F3RUEKQVFLQ0FnQXY5Zk13UnpzTisrdlF4cWd5M3JwM1gzbkpOU3BWakVTVUVTdVNQSTFGWXd3R0xtSGRjWTRiK3pMYwpMeWl0VDJsSEszNE5nM1pmOHZrQzl5S1k1YVBRZGt2ZERtaDZYR3FoTmswd1ZhOUpzeWhPd2JSSHpuVXpiVjBaCnZkMDZVd2x1MTQvMHpLMzBCUFBYOTZTZjN1aFpCclIrNnJiUisxT2VSUE1KbDArWDdFYmliRWlhd1F1R1hsVHAKQVB5eE5FaTNzZ0h1M0VhcnJIdXNYNzNHYW5BY1U3RW9zRlUrZFRGSktEcGxXSVVsUUNwajFYZzF0aVZKMWxFYQo4Wit0UkY0T1BQRjFsUkZLaGU1cHBXSjJWbkVzRjVUZ09xRXc0NHBLbk80Zlo5ZGFhVzRRbTBxSmNtOU5XQTRoCndwSDA3czRmcGt6eG5qU1JsbmFDZDlyandGeVBsSkJzUXNhVlFFNzlpQzJZMTRnTk9KQ0xyMXRKSEQ2ODN3bW4KS3ZNOHZpOTdHTmIybXZHeWNtZnloNVpzTFBpTWNqOFFER3VWZU53dlNESXpybnhqVkZlc0liTWt5UlZRem9IVApTTHRQbXdVR3lwRHVrMDhaZytsT0lYOC85K3lqMER3MDRqenllTVptYlFVdkd2N2lNWjFUaHdaRHF1YkJXV3J4CmtYTmJwTG9BMGxrcHh4bjdGam9Ya20zM2ZKQURjd2xWSS82WFNrSm1FaFVlZmZnaFFSMGNyVGphQVd1Qkx2Qk0KT0s5aEEzT3RTN2F0S2FDb1lvSmRrYkpHQTdWdytNNzA4NEJOTGhxM1Fyckg4S3M3Z05pdC9NN3lxSnU1alBaZgo2SE1seHNyWU9NVUhuVlk4VDkwN0Q3cS9ORUNnRThzODhnZzAyQ3JNWTFqanE4UnBpUUtDQVFFQTE2UHJaMUEwClNISS83akdmS3BETkJzQ0xrVUFxRERKSzQ0dFdJYmJBUXFhRTN1eDh3bkFlU2NjSHozbS9ScEpPSGtteHZTZlgKbTJ1Wk8veGtNTWhYK2lwOHdFOHZibzR1enVNYitTSXE3bWpialJkK1JJczJ5NHJsZVQ2NGVjRWc4R2pZckExZgpiSEI0MmhQclVTcXpxUVIwOTZocm1Lb1diU0RDZDZwOUVNeWVzT3IwTjdtQmJYVVZPazJxZGtYRlZWbHBlUDdpClFxWGdRUUI0bHgzLzJJdlpBMlhJUXlQdGJ0RWVRbmgyQ3FNM2NDMzR0VEVjZ244K0VwNG9SWmkwTTBHaUY3bXgKOTEvZHY2THZlNTR5K1pON1lXd1NFQ09ubzd5bDlvTlBZVnVGMGRiMjh0elppMThCeHJTQ2JESE1XbExvUzhWNgpXTEo0OGlSODJDYkc1d0tDQVFFQXdFRjM4KzYyeDhDU2x0blZZNlJaN0J0NEdiNEJqVWhWYXZ0NFkxUGFlbXFNCjFidFVnR2JyUnBoNHFUSEFTckUwUUZLeVZKYnlCUkJyRHIxWHU4WWRSVXQzZC92VzlIR1dPd1BKdTN2M3pLbHMKQ2xsZnpFY3J5L1l2aHAzSzlEcGR6OE1icHdueW5xcGV6b0xMNlJpL3JnK0hyTzBueXd1RSt0T2xYVFo2eUtadApHWVdTSVBWaG00NUJkc2ZxUzhnYjVvbjA0bHh3bnhxVnJvN0c0TUR6cmVEYlFhaGdyS3VuRWxwajZ4eW1PVWpBCkdCZDR3QUVrUExxNUUrRWcreDY4TkRLVTYwK29ybFhLWVhDQm5HSFZOQ3BVcmswVXkrcHFZZmFEN3VuR2VzaHMKSEwra3lXbXl5a3ErTmNKbnRXMFNSNy9sU1IvZUFhVEZyVzZVaXV0RGJRS0NBUUVBemhRYU9PNmVPSW51N016QgpScVdCT3EyeDg4cjFKQmpBRnZzbkFpc3JTOGJsZmtGVTdXREdvVTB5K3FWb0ZhSm1RMjI4RFlCUS9YZnp4aTdxCjlPL1JuQU1VbTVoUlJQOWVYbHNPZGFXZ2o1em9ETXRoNFZHRnVUbHhHZERGN1oyU3hBMysyMVlnVm5xYUZCY3IKTUxOMVpOWWNqajJITGl1R0tSNUFtcW4wd2FRN0YrcENJQ3NKTkxqSzQ2QXJnc0lrMXU4TzdCSHgyeTI0eFlZVQp1SjV6emRmQU9nNEFONkhURzY5L2twaWFmb29DeGhNNDlyZ0xmZTdxUEZLbk8vTzJhckdUbmNiWi9BWEMzb3h3Ci81dHRMYlF6R2lSMGtyWHdWSHRKdys4elltQmIzL0RtcWF4RHZueTZMdEo5UGJiTmk1aGw1VnZCRTVqa0dzeWgKL3RQNEN3S0NBUUJ2R1dZb0lKcWZkRGxCMHovdEJOeXlCRzJ5OG9vVEN1blJtT0JKQmZ3TEllZWcyMUJKb3kveQo2OGxPZk9HU1NEVFp0dkEyMGNPcUNZTFVVYmFSWERzdUFCNVp4NzdBSTZPZEZ1Tk01S2FlTG9td3NWVWF4MFlYCjUzd3ZYcUFaNG1DejN4dnJ1MlBwTEtyOHk3anFTdEw1MHgra1hxZlFQaWZxaXNQVXlkYktmT0l2RFhFVWVyaWQKRytmWXJFNUkzS3JDM3BZVStUWmJ1eEVrZm4yUEEvSE5XVk5hN2VKdjVnSDJLU1gwaCtuRzBMT3hPRjhmRlluTApUbHdGa09OdU9xU254Vk1wYUM4aUQ1R1VIVi9JN3dBMTFRQjZlVEM3Wmd0ejhQRHM3MHN6U1A2dzNrNXIxaGpyCnJhV2RpMnBDL1hUQzRiR3VRQ3dhNXcwVTNBSWJCVGxCQW9JQkFEc1RONGhvclVHNWw3MXhLZk5ibVBTbDZ6RlIKYTJ4d2U2VVZPOVZzMFpHeEdLWWJSN1VuVDBDL1FqUiswS2JsbE9leDdFY3cyMklCcmFFVzBGbXpuVnoyUW9FNwpMUE5COXhyTTFEeE56UjZEbFBUeERMcEFGWVlUcm40SWY1cjFVdVdpc2lMdmd6T2xGTlVITnN5UFJIZWNGblhUCnNhTk9JWkgrQTJ5KzF3QWdpSFZIS2JPRGRHeVFQVlQ0TXFFWkJaY2pQcmRBekNKcnloSHlYdHBqRjFSdlFEYTMKTVM3U3JVTGM4djJGQWJ1VG1QZ2R1ZHBKd1Q4dENCa2VRKzZ4YmJWN3YrZzBEMG5EWFNIZFVwNXFyUzcrTnhtVwp4NWV4UHo1VENhYXcxSnkzWjRmT1MzMTV6eHJGdmRHTmhWRXhMMzRlUVlzOHRYN0N0VWxuWkNray9zYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0=
  101. ---
  102. apiVersion: v1
  103. kind: Secret
  104. metadata:
  105. labels:
  106. k8s-app: kubernetes-dashboard
  107. name: kubernetes-dashboard-csrf
  108. namespace: kubernetes-dashboard
  109. type: Opaque
  110. data:
  111. csrf: ""
  112. ---
  113. apiVersion: v1
  114. kind: Secret
  115. metadata:
  116. labels:
  117. k8s-app: kubernetes-dashboard
  118. name: kubernetes-dashboard-key-holder
  119. namespace: kubernetes-dashboard
  120. type: Opaque
  121. ---
  122. kind: ConfigMap
  123. apiVersion: v1
  124. metadata:
  125. labels:
  126. k8s-app: kubernetes-dashboard
  127. name: kubernetes-dashboard-settings
  128. namespace: kubernetes-dashboard
  129. ---
  130. kind: Role
  131. apiVersion: rbac.authorization.k8s.io/v1
  132. metadata:
  133. labels:
  134. k8s-app: kubernetes-dashboard
  135. name: kubernetes-dashboard
  136. namespace: kubernetes-dashboard
  137. rules:
  138. # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  139. - apiGroups: [""]
  140. resources: ["secrets"]
  141. resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
  142. verbs: ["get", "update", "delete"]
  143. # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  144. - apiGroups: [""]
  145. resources: ["configmaps"]
  146. resourceNames: ["kubernetes-dashboard-settings"]
  147. verbs: ["get", "update"]
  148. # Allow Dashboard to get metrics.
  149. - apiGroups: [""]
  150. resources: ["services"]
  151. resourceNames: ["heapster", "dashboard-metrics-scraper"]
  152. verbs: ["proxy"]
  153. - apiGroups: [""]
  154. resources: ["services/proxy"]
  155. resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
  156. verbs: ["get"]
  157. ---
  158. kind: ClusterRole
  159. apiVersion: rbac.authorization.k8s.io/v1
  160. metadata:
  161. labels:
  162. k8s-app: kubernetes-dashboard
  163. name: kubernetes-dashboard
  164. rules:
  165. # Allow Metrics Scraper to get metrics from the Metrics server
  166. - apiGroups: ["metrics.k8s.io"]
  167. resources: ["pods", "nodes"]
  168. verbs: ["get", "list", "watch"]
  169. ---
  170. apiVersion: rbac.authorization.k8s.io/v1
  171. kind: RoleBinding
  172. metadata:
  173. labels:
  174. k8s-app: kubernetes-dashboard
  175. name: kubernetes-dashboard
  176. namespace: kubernetes-dashboard
  177. roleRef:
  178. apiGroup: rbac.authorization.k8s.io
  179. kind: Role
  180. name: kubernetes-dashboard
  181. subjects:
  182. - kind: ServiceAccount
  183. name: kubernetes-dashboard
  184. namespace: kubernetes-dashboard
  185. ---
  186. apiVersion: rbac.authorization.k8s.io/v1
  187. kind: ClusterRoleBinding
  188. metadata:
  189. name: kubernetes-dashboard
  190. namespace: kubernetes-dashboard
  191. roleRef:
  192. apiGroup: rbac.authorization.k8s.io
  193. kind: ClusterRole
  194. name: kubernetes-dashboard
  195. subjects:
  196. - kind: ServiceAccount
  197. name: kubernetes-dashboard
  198. namespace: kubernetes-dashboard
  199. ---
  200. kind: Deployment
  201. apiVersion: apps/v1
  202. metadata:
  203. labels:
  204. k8s-app: kubernetes-dashboard
  205. name: kubernetes-dashboard
  206. namespace: kubernetes-dashboard
  207. spec:
  208. replicas: 1
  209. revisionHistoryLimit: 10
  210. selector:
  211. matchLabels:
  212. k8s-app: kubernetes-dashboard
  213. template:
  214. metadata:
  215. labels:
  216. k8s-app: kubernetes-dashboard
  217. spec:
  218. containers:
  219. - name: kubernetes-dashboard
  220. image: kubernetesui/dashboard:v2.0.0-beta4
  221. imagePullPolicy: Always
  222. ports:
  223. - containerPort: 8443
  224. protocol: TCP
  225. args:
  226. - --auto-generate-certificates
  227. - --namespace=kubernetes-dashboard
  228. - --token-ttl=43200
  229. # Uncomment the following line to manually specify Kubernetes API server Host
  230. # If not specified, Dashboard will attempt to auto discover the API server and connect
  231. # to it. Uncomment only if the default does not work.
  232. # - --apiserver-host=http://my-address:port
  233. volumeMounts:
  234. - name: kubernetes-dashboard-certs
  235. mountPath: /certs
  236. # Create on-disk volume to store exec logs
  237. - mountPath: /tmp
  238. name: tmp-volume
  239. livenessProbe:
  240. httpGet:
  241. scheme: HTTPS
  242. path: /
  243. port: 8443
  244. initialDelaySeconds: 30
  245. timeoutSeconds: 30
  246. volumes:
  247. - name: kubernetes-dashboard-certs
  248. secret:
  249. secretName: kubernetes-dashboard-certs
  250. - name: tmp-volume
  251. emptyDir: {}
  252. serviceAccountName: kubernetes-dashboard
  253. # Comment the following tolerations if Dashboard must not be deployed on master
  254. tolerations:
  255. - key: node-role.kubernetes.io/master
  256. effect: NoSchedule
  257. ---
  258. kind: Service
  259. apiVersion: v1
  260. metadata:
  261. labels:
  262. k8s-app: dashboard-metrics-scraper
  263. name: dashboard-metrics-scraper
  264. namespace: kubernetes-dashboard
  265. spec:
  266. ports:
  267. - port: 8000
  268. targetPort: 8000
  269. selector:
  270. k8s-app: dashboard-metrics-scraper
  271. ---
  272. kind: Deployment
  273. apiVersion: apps/v1
  274. metadata:
  275. labels:
  276. k8s-app: dashboard-metrics-scraper
  277. name: dashboard-metrics-scraper
  278. namespace: kubernetes-dashboard
  279. spec:
  280. replicas: 1
  281. revisionHistoryLimit: 10
  282. selector:
  283. matchLabels:
  284. k8s-app: dashboard-metrics-scraper
  285. template:
  286. metadata:
  287. labels:
  288. k8s-app: dashboard-metrics-scraper
  289. spec:
  290. containers:
  291. - name: dashboard-metrics-scraper
  292. image: kubernetesui/metrics-scraper:v1.0.1
  293. ports:
  294. - containerPort: 8000
  295. protocol: TCP
  296. livenessProbe:
  297. httpGet:
  298. scheme: HTTP
  299. path: /
  300. port: 8000
  301. initialDelaySeconds: 30
  302. timeoutSeconds: 30
  303. volumeMounts:
  304. - mountPath: /tmp
  305. name: tmp-volume
  306. serviceAccountName: kubernetes-dashboard
  307. # Comment the following tolerations if Dashboard must not be deployed on master
  308. tolerations:
  309. - key: node-role.kubernetes.io/master
  310. effect: NoSchedule
  311. volumes:
  312. - name: tmp-volume
  313. emptyDir: {}
  314. ---
  315. apiVersion: extensions/v1beta1
  316. kind: Ingress
  317. metadata:
  318. labels:
  319. k8s-app: kubernetes-dashboard
  320. name: kubernetes-dashboard
  321. namespace: kubernetes-dashboard
  322. annotations:
  323. kubernetes.io/ingress.class: traefik
  324. traefik.ingress.kubernetes.io/frontend-entry-points: http,https
  325. traefik.ingress.kubernetes.io/redirect-entry-point: https
  326. spec:
  327. rules:
  328. - host: csdd.xxxx.com
  329. http:
  330. paths:
  331. - backend:
  332. serviceName: kubernetes-dashboard
  333. servicePort: 443
  334. tls:
  335. - secretName: dashboard-tls-cert
  336. # 创建kubernetes-dashboard token 登录
  337. # 生成token
  338. kubectl create sa dashboard-admin -n kube-system
  339. # 授权token 访问权限
  340. kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
  341. # 获取token
  342. ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
  343. # 获取dashboard.kubeconfig 使用token 值
  344. DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
  345. echo ${DASHBOARD_LOGIN_TOKEN}
  346. # 设置集群参数
  347. kubectl config set-cluster kubernetes \
  348. --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  349. --embed-certs=true \
  350. --server=${KUBE_APISERVER} \
  351. --kubeconfig=dashboard.kubeconfig
  352. # 设置客户端认证参数,使用上面创建的 Token
  353. kubectl config set-credentials dashboard_user \
  354. --token=${DASHBOARD_LOGIN_TOKEN} \
  355. --kubeconfig=dashboard.kubeconfig
  356. # 设置上下文参数
  357. kubectl config set-context default \
  358. --cluster=kubernetes \
  359. --user=dashboard_user \
  360. --kubeconfig=dashboard.kubeconfig
  361. # 设置默认上下文
  362. kubectl config use-context default --kubeconfig=dashboard.kubeconfig
  363. # 绑定hosts
  364. https://csdd.xxxx.com/#/overview?namespace=default
  365. # kubernetes-dashboard 使用metrics 显示cpu内存资源 所有要部署metrics-server

metrics-server 部署

  1. # win on Ubuntu 操作
  2. #创建metrics-server 证书
  3. at << EOF | tee /apps/work/k8s/cfssl/k8s/metrics-server.json
  4. {
  5. "CN": "metrics-server",
  6. "key": {
  7. "algo": "rsa",
  8. "size": 2048
  9. },
  10. "names": [
  11. {
  12. "C": "CN",
  13. "ST": "GuangDong",
  14. "L": "GuangZhou",
  15. "O": "cluster",
  16. "OU": "cluster"
  17. }
  18. ]
  19. }
  20. EOF
  21. ### 生成证书
  22. cfssl gencert -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
  23. -config=/apps/work/k8s/cfssl/ca-config.json \
  24. -profile=kubernetes /apps/work/k8s/cfssl/k8s/metrics-server.json | cfssljson -bare ./metrics-server
  25. # 创建metrics-server-secrets.yaml
  26. # base64 加密
  27. cat metrics-server.pem|base64 | tr -d '\n'
  28. cat metrics-server-key.pem|base64 | tr -d '\n'
  29. vi metrics-server-secrets.yaml
  30. apiVersion: v1
  31. kind: Secret
  32. metadata:
  33. labels:
  34. k8s-app: metrics-server
  35. name: metrics-server-certs
  36. namespace: kube-system
  37. type: Opaque
  38. data:
  39. metrics-server.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3VENDQXRXZ0F3SUJBZ0lVYkloOTQ3Z3NET2gxdVplVnBoMU9GVmhYeHA0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURjd05UQXpOVGd3TUZvWERUSTVNRGN3TWpBek5UZ3cKTUZvd2NqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUmN3CkZRWURWUVFERXc1dFpYUnlhV056TFhObGNuWmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTUlvdHhsakpLcCtyb1hGcGJJWXRuNW1mVXVyREM1bUlkb2Z0RTNSVGhNU1pPSCt0aVVoMDdQRQpnb0xPOG1lSkxaS21ob1BUYzhJWTJYVjdiZzJWWFpRVUd6MFJuMExmNWdWam5UN29yMFFnZzErYnJnZU5wWUtpCjVoNm5ISVE3ZGlKYk10cUFndm16RGR6bWdoUXd2SHBKTzh3bEQwKzRwT0VHT2VtQkNPU3BsaFhrenR3UWQ3ZHYKY2x1QUljQUdiUGF6dzI4VkJJU2F4bCtrZnZwNzIyeEkvVy9DL3pRS1JnN053UG9IaVpFWm9QcGxPY001cGpvUwpJeEdnWVZEYjB6OGlqZWR3RjZmcE9RZkFOcitvQnVONnZnMXAzd2Jud2tKTWtEQUV2MzBXZG1BUzB5STJicC9RCkJZYjU2VWxGTXI4anNoWHJ5dlVsZ3F3S0hscFh0WkVDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCVEp6cVJBMWdIN***d3B3TG01ZEtWUHFvdG43VEFmQmdOVkhTTUVHREFXZ0JSeDRLY0FSY2FrClIvYnhtd29UQmVEcyt4QW9hVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBcTgyS0JRMWJBM1pjeG9OZGFJVVUKNjVvSkZhM3paM2k5SDk1QWZvU0dXQWQvbFU3L3B2Qkt2K0EwbS9qQ0tMS1FpQ0xXRGtHYzFUNS8xTzhPYTg0YgpDV3I5M3pOWldRb2N6UWJEaHFCRnZNUVp0azRYVVkrMjR3Yjd3cmJCRHc2QVY2R0l2bVMrYm91eFRVd29sbmRMCk5FS2EvcHNvQUtaRUFJZkJUaCtMNVpMQ09GOXFUWEMyOGtnN1czak4vMzBiYlk5UE5ObVpLcGNaNEpEVjA5aGYKU3RaTjZuOVFXK3ZDcFFoZXVISWVORlR2RnQ5bGtSMVBFYUtHUjFiWEdyeUNHOHNTeXVDc0xER1lnVlhmYVZtYgp3dTlnSG1JS2E2aDZWVmVIWitMbVFmZmxqcEdPRStKV1l1TWRPamtHYUYxdEtLUWZFelZGN3BxT0VTQXkrV3hpCnVBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
  40. metrics-server-key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBd2lpM0dXTWtxbjZ1aGNXbHNoaTJmbVo5UzZzTUxtWWgyaCswVGRGT0V4Sms0ZjYyCkpTSFRzOFNDZ3M3eVo0a3RrcWFHZzlOendoalpkWHR1RFpWZGxCUWJQUkdmUXQvbUJXT2RQdWl2UkNDRFg1dXUKQjQybGdxTG1IcWNjaER0Mklsc3kyb0NDK2JNTjNPYUNGREM4ZWtrN3pDVVBUN2lrNFFZNTZZRUk1S21XRmVUTwozQkIzdDI5eVc0QWh3QVpzOXJQRGJ4VUVoSnJHWDZSKytudmJiRWo5YjhML05BcEdEczNBK2dlSmtSbWcrbVU1Cnd6bW1PaElqRWFCaFVOdlRQeUtONTNBWHArazVCOEEydjZnRzQzcStEV25mQnVmQ1FreVFNQVMvZlJaMllCTFQKSWpadW45QUZodm5wU1VVeXZ5T3lGZXZLOVNXQ3JBb2VXbGUxa1FJREFRQUJBb0lCQUQzaEtoSGVSalpNYjZUVQp6QzFKc1FmeDlDYmttZHBEMUxBbkFKajRoekNPNFRZaHJyMkEzVzdpeDFHWFVTeHNUT2o3UjgzRjI1UFZ4YUNxCnVQVjlVRGk4ZTczbjJ1RSthSm41R0ltUE1TUytWQUJwcG5wank0Y3FFYnRkT1RwdmxRUDRHdW9Hb1RlaHVGNVoKM01WQWp5Rk9LOCt4VkFMdGJ5Y0VpL3ArbEc0RGkvOThIcUlDQngwSlhCUnJoV05lWUdZL0c3eGNWT2pCNUl5SQpPNVpoZ1I0Sk9yODloNVZ1RHdIY3E2UVlLQ2sxTktQZzc0Y3BOY2J5ZVZEM0FhYVRHd25QU3BMd0hGaElzTGpNCkllaEJqZzkrZDdyRU8xMHU0azhKWW5qYUdNMzRMM0RlTVVrck95NUMzRjY3RFNwaTJVZUhjOVh6YzViVUFwb0gKTE1zRUxuMENnWUVBenV3STRSN0FrYmZ2VnVFR0hOa0FFNjgzdTJoWk1iUFdwSHVVTGQ5UzJnUzYzODVWUFdJSApiQXp6WGZuTjh1U2dHNVVQdzUxa1VLZVppY0pORTVBWWtOVDRwMWxoTFdKSkwxSWRSdEV3VU5oblVLNlczRWlMCmJLeDhhalk3dkZDV0ZKUmRTUHJYLzViTWU4TVBCWWNTT0FkZEErZFhlaGd0K2x0WEU3LzE1cGNDZ1lFQThEVzcKOEIyZGNYLzZqMVF4UkhEc3ZGUDYxaWxTREJhMEtKUGlobnpSZWVwVHRFc0hvRitBODk2SXRDZFFMSTJZcEZyRApBU1dSSU1VQVVzVE1uMStvZFprOGovd21QRkxzUmRpSVJWZC80ZHdCTmlDNHJxdnkwQTFxUVNJUXF1MC9CcFV2ClRpMjhZeURRdHh0Wmg1d0NDQUx2a0Rqb2N4cXJzbHEwRDViTnNoY0NnWUFFNjB1U3ZuNXBlR3Z5THN0TWFFUSsKTTQ0cG9HTnVjUTRMUHZ6WTBsUlA1b2ppeFM3MWxKMEVqcEpDaTZ1L3VIdXQ3QzlCd1Q0ZlJMeTdyNmpSYkQySgpRK2JkWTV5UnphSmJ3Nkg2aXdLUkNYUDdVUXM1Rldockh2YWVOOGZYeERxdEpwSEpLRjEyTUFtUWI2U3R4dlpCCjZycmxXdHlUaEh1alZnU043YVJVNVFLQmdHbVFJN3lkTnpERy9sVDR1Z0lLNG03Tk5VSGl2TlRsTVYxWHlFZzAKR0ZiTW5PWnh4ck02NVUvRzd5ckUwQjRVU0EyS2VZSktnU0gya1hMT1crSjZSbTBQMzZhak9DWndocmNYTnFQSwpsVCtyMExoNTNzK2NiMFB4Y1UyWWE5ekNFRjJUT0V2U0c2VXdxYWllazFUZVFhSkZzQVFnamo3dmJKOGY3MXVlCmVWMFhBb0dCQUlQWDN1OTJtU3o5clpqOTN3NFNKbUk2K0tkY3BtTGg0TE5IcUdBOVJSNi80T0NWWDBHNHVtZ1YKMkxUOU1MY05CdUVFZE5nTHRHeGIzdTI5cUlLZHZpcDhBZzlTbHQvMlBIcnlsLzIzWWU0bURTWDVUOXNrVXhaRgpjVGxvN3QxSnRZN3NaRU00Vng4cWxQbDcwWXMvSWRuWmhWaFU2d1F2ZGp0TGk1UlU4L2ttCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
  41. # resource-reader.yaml
  42. vi resource-reader.yaml
  43. ---
  44. apiVersion: rbac.authorization.k8s.io/v1
  45. kind: ClusterRole
  46. metadata:
  47. name: system:metrics-server
  48. rules:
  49. - apiGroups:
  50. - ""
  51. resources:
  52. - pods
  53. - nodes
  54. - nodes/stats
  55. verbs:
  56. - get
  57. - list
  58. - watch
  59. ---
  60. apiVersion: rbac.authorization.k8s.io/v1
  61. kind: ClusterRoleBinding
  62. metadata:
  63. name: system:metrics-server
  64. roleRef:
  65. apiGroup: rbac.authorization.k8s.io
  66. kind: ClusterRole
  67. name: system:metrics-server
  68. subjects:
  69. - kind: ServiceAccount
  70. name: metrics-server
  71. namespace: kube-system
  72. # metrics-server-service.yaml
  73. vi metrics-server-service.yaml
  74. ---
  75. apiVersion: v1
  76. kind: Service
  77. metadata:
  78. name: metrics-server
  79. namespace: kube-system
  80. labels:
  81. kubernetes.io/name: "Metrics-server"
  82. spec:
  83. selector:
  84. k8s-app: metrics-server
  85. ports:
  86. - port: 443
  87. protocol: TCP
  88. targetPort: 443
  89. # metrics-apiservice.yaml
  90. vi metrics-apiservice.yaml
  91. ---
  92. apiVersion: apiregistration.k8s.io/v1beta1
  93. kind: APIService
  94. metadata:
  95. name: v1beta1.metrics.k8s.io
  96. spec:
  97. service:
  98. name: metrics-server
  99. namespace: kube-system
  100. group: metrics.k8s.io
  101. version: v1beta1
  102. insecureSkipTLSVerify: true
  103. groupPriorityMinimum: 100
  104. versionPriority: 100
  105. # auth-reader.yaml
  106. vi auth-reader.yaml
  107. ---
  108. apiVersion: rbac.authorization.k8s.io/v1beta1
  109. kind: RoleBinding
  110. metadata:
  111. name: metrics-server-auth-reader
  112. namespace: kube-system
  113. roleRef:
  114. apiGroup: rbac.authorization.k8s.io
  115. kind: Role
  116. name: extension-apiserver-authentication-reader
  117. subjects:
  118. - kind: ServiceAccount
  119. name: metrics-server
  120. namespace: kube-system
  121. # auth-delegator.yaml
  122. vi auth-delegator.yaml
  123. ---
  124. apiVersion: rbac.authorization.k8s.io/v1beta1
  125. kind: ClusterRoleBinding
  126. metadata:
  127. name: metrics-server:system:auth-delegator
  128. roleRef:
  129. apiGroup: rbac.authorization.k8s.io
  130. kind: ClusterRole
  131. name: system:auth-delegator
  132. subjects:
  133. - kind: ServiceAccount
  134. name: metrics-server
  135. namespace: kube-system
  136. # aggregated-metrics-reader.yaml
  137. vi aggregated-metrics-reader.yaml
  138. kind: ClusterRole
  139. apiVersion: rbac.authorization.k8s.io/v1
  140. metadata:
  141. name: system:aggregated-metrics-reader
  142. labels:
  143. rbac.authorization.k8s.io/aggregate-to-view: "true"
  144. rbac.authorization.k8s.io/aggregate-to-edit: "true"
  145. rbac.authorization.k8s.io/aggregate-to-admin: "true"
  146. rules:
  147. - apiGroups: ["metrics.k8s.io"]
  148. resources: ["pods"]
  149. verbs: ["get", "list", "watch"]
  150. # metrics-server-deployment.yaml
  151. vi metrics-server-deployment.yaml
  152. ---
  153. apiVersion: v1
  154. kind: ServiceAccount
  155. metadata:
  156. name: metrics-server
  157. namespace: kube-system
  158. ---
  159. apiVersion: apps/v1
  160. kind: Deployment
  161. metadata:
  162. name: metrics-server
  163. namespace: kube-system
  164. labels:
  165. k8s-app: metrics-server
  166. spec:
  167. selector:
  168. matchLabels:
  169. k8s-app: metrics-server
  170. template:
  171. metadata:
  172. name: metrics-server
  173. labels:
  174. k8s-app: metrics-server
  175. spec:
  176. serviceAccountName: metrics-server
  177. tolerations:
  178. - effect: NoSchedule
  179. key: node.kubernetes.io/unschedulable
  180. operator: Exists
  181. - key: NoSchedule
  182. operator: Exists
  183. effect: NoSchedule
  184. volumes:
  185. # mount in tmp so we can safely use from-scratch images and/or read-only containers
  186. - name: tmp-dir
  187. emptyDir: {}
  188. - name: metrics-server-certs
  189. secret:
  190. secretName: metrics-server-certs
  191. containers:
  192. - name: metrics-server
  193. image: juestnow/metrics-server-amd64:v0.3.3
  194. imagePullPolicy: Always
  195. command:
  196. - /metrics-server
  197. - --tls-cert-file=/certs/metrics-server.pem
  198. - --tls-private-key-file=/certs/metrics-server-key.pem
  199. - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
  200. - --kubelet-insecure-tls
  201. volumeMounts:
  202. - name: tmp-dir
  203. mountPath: /tmp
  204. - name: metrics-server-certs
  205. mountPath: /certs
  206. # 创建metrics-server 服务
  207. kubectl apply -f .
  208. # 验证metrics-server
  209. kubectl top node
  210. [root@]~]#kubectl top node
  211. NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
  212. master 177m 7% 2057Mi 35%
  213. [root@]~]#kubectl top pods -A
  214. NAMESPACE NAME CPU(cores) MEMORY(bytes)
  215. clusterstorage nfs-client-provisioner-5f6bc44cd7-fjr7f 3m 15Mi
  216. kube-system coredns-597b77445b-fhxvr 4m 23Mi
  217. kube-system kube-router-5tmgw 9m 16Mi
  218. kube-system metrics-server-66d78c47-zn679 1m 14Mi
  219. kube-system traefik-578574dfdb-dzl22 6m 41Mi
  220. kubernetes-dashboard dashboard-metrics-scraper-fb986f88d-rc6zs 1m 25Mi
  221. kubernetes-dashboard kubernetes-dashboard-668c4f84bc-w6vw6 2m 40Mi
  222. # 能够正常获取CPU 内存值

编译安装kubernetes 1.15.3
编译安装kubernetes 1.15.3

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/繁依Fanyi0/article/detail/561676
推荐阅读
相关标签
  

闽ICP备14008679号