赞
踩
参考连接:
https://blog.csdn.net/weixin_46703850/article/details/122922090
kubectl get namespace 或者 kubectl get ns
kubectl get pods -A
kubectl get pods -n [namespace]
kubectl delete ns [namespace]
文件名:createns.yaml
apiVersion: v1 # 版本
kind: Namespace # 类型
metadata:
name: hello
$ kubectl apply -f createns.yaml
理解:
k8s将容器先封装成pod,再对pod进行操作,pod是kubernetes中应用的最小单位。
$ kubectl run 【Pod名称】 --image=【镜像名称】
[root@k8s-mater ~]$ kubectl run mynginx --image=nginx
pod/mynginx created
[root@k8s-mater ~]$ kubectl get pod
NAME READY STATUS RESTARTS AGE
mynginx 1/1 Running 0 2m23s
$ kubectl describe pod 【Pod名称】
[root@k8s-mater ~]$ kubectl describe pod mynginx Name: mynginx Namespace: default # 所在名称空间 Priority: 0 Node: node2/192.168.64.131 # 运行节点为node2 Start Time: Fri, 11 Feb 2022 20:47:48 -0500 Labels: run=mynginx Annotations: cni.projectcalico.org/containerID: 1385a3cd6d332c41ce6d538001af9044c091d76e54870cb1d13e5f69cb84ce80 cni.projectcalico.org/podIP: 172.31.104.4/32 cni.projectcalico.org/podIPs: 172.31.104.4/32 Status: Running IP: 172.31.104.4 # ip地址[每个Pod - k8s都会分配一个ip] IPs: IP: 172.31.104.4 # ...
node2上执行 docker ps | grep mynginx 和 docker images | grep nginx 就能够查看nginx的镜像信息
注意:
(集群中的任意一个机器以及任意的应用都能通过Pod分配的ip来访问这个Pod)
此时的pod还不被外部访问
$ kubectl get pod -owide
[root@k8s-mater ~]$ kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
mynginx 1/1 Running 0 7m27s 172.31.104.4 node2 <none> <none>
访问容器id,返回nginx的欢迎页
[root@k8s-mater ~]$ curl 172.31.104.4
# ...
<h1>Welcome to nginx!</h1>
# ...
$ kubectl delete pod 【Pod名称】
[root@k8s-mater ~]$ kubectl delete pod mynginx
pod "mynginx" deleted
$ kubectl logs【Pod名称】
创建名称:creatpod.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: mynginx
name: mynginx
namespace: default # 设置命名空间 (可视化操作时设置命名空间 或 切换的特定命名空间再创建)
spec:
containers:
- image: nginx
name: mynginx
$ kubectl apply -f creatpod.yaml
$ kubectl delete -f creatpod.yaml
$ kubectl exec -it mynginx -- /bin/bash
[root@k8s-mater Downloads]# kubectl exec -it mynginx -- /bin/bash
root@mynginx:/# ls
bin dev docker-entrypoint.sh home lib64 mnt proc run srv tmp var
boot docker-entrypoint.d etc lib media opt root sbin sys usr
root@mynginx:/# cd /usr/share/nginx/html
root@mynginx:/usr/share/nginx/html# ls
50x.html index.html
root@mynginx:/usr/share/nginx/html# echo "Hello k8s" > index.html
root@mynginx:/usr/share/nginx/html# exit
exit
[root@k8s-mater Downloads]# curl 172.31.104.6
Hello k8s
每1秒自动执行一次kubectl get pod
$ watch -n 1 kubectl get pod
创建名为:creatpods.yaml
apiVersion: v1
kind: Pod
metadata:
labels:
run: myapp
name: myapp
spec:
containers:
- image: nginx # 容器1
name: nginx
- image: tomcat:8.5.68 # 容器2
name: tomcat
$ kubectl apply -f creatpods.yaml
[root@k8s-mater Downloads]# kubectl get pod
NAME READY STATUS RESTARTS AGE
myapp 0/2 ContainerCreating 0 14s
mynginx 1/1 Running 0 56m
测试访问
$ curl 172.31.166.132 # 返回nginx的欢迎信息
$ curl 172.31.166.132:8080 # 返回tomcat的欢迎信息
一个pod创建两个nginx容器的错误
apiVersion: v1
kind: Pod
metadata:
labels:
run: myapp2
name: myapp2
spec:
containers:
- image: nginx
name: nginx01
- image: nginx
name: nginx02
错误,因为nginx01占用了80端口,导致nginx02创建失败。
$ kubectl delete myapp mynginx -n default
讲解:
Deployment可以控制pod,使pod拥有多个副本,自愈能力和扩缩容能力
$ kubectl run mynginx01 --image=nginx
[root@k8s-mater Downloads]# kubectl run mynginx01 --image=nginx
pod/mynginx01 created
[root@k8s-mater Downloads]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mynginx01 0/1 ContainerCreating 0 12s
[root@k8s-mater Downloads]# kubectl delete pods mynginx01
pod "mynginx01" deleted
[root@k8s-mater Downloads]# kubectl get pods
No resources found in default namespace.
$ kubectl create deployment mynginx02 --image=nginx
[root@k8s-mater Downloads]# kubectl create deployment mynginx02 --image=nginx
deployment.apps/mynginx02 created
[root@k8s-mater Downloads]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mynginx02-587cfb5b64-frmq9 0/1 ContainerCreating 0 6s
[root@k8s-mater Downloads]# kubectl delete pods mynginx02-587cfb5b64-frmq9
pod "mynginx02-587cfb5b64-frmq9" deleted
[root@k8s-mater Downloads]# kubectl get pods
NAME READY STATUS RESTARTS AGE
mynginx02-587cfb5b64-w48z6 0/1 ContainerCreating 0 11s
可以看出,删除pod还是能够正常起来一个pod
[root@k8s-mater Downloads]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
mynginx02 1/1 1 1 18m
[root@k8s-mater Downloads]# kubectl delete deployment mynginx02
deployment.apps "mynginx02" deleted
[root@k8s-mater Downloads]# kubectl get pods
No resources found in default namespace. # 删除成功
假设部署三份
$ kubectl create deployment my-dep --image=nginx --replicas=3
[root@k8s-mater ~]# kubectl create deployment my-dep --image=nginx --replicas=3
deployment.apps/my-dep created
[root@k8s-mater ~]# kubectl get deployment
# 名称 可用/总数 正在更新 可用的pod数量
NAME READY UP-TO-DATE AVAILABLE AGE
my-dep 2/3 3 2 32s
[root@k8s-mater ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
my-dep-5b7868d854-4b2rb 1/1 Running 0 38s
my-dep-5b7868d854-ffr7k 1/1 Running 0 38s
my-dep-5b7868d854-v4w7j 1/1 Running 0 38s
kubectl get pods -owide #可以详细查看每个pod部署在哪个工作节点以及pod的ip等其他信息
[root@k8s-mater ~]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-dep-01-686cfb7bf-55p99 1/1 Running 0 38s 172.31.104.13 node2 <none> <none>
my-dep-01-686cfb7bf-7qfvm 1/1 Running 0 38s 172.31.166.140 node1 <none> <none>
my-dep-01-686cfb7bf-qvbz8 1/1 Running 0 38s 172.31.166.139 node1 <none> <none>
创建名为:deployment.yaml
apiVersion: apps/v1 kind: Deployment metadata: labels: app: my-dep-02 name: my-dep-02 spec: replicas: 3 selector: matchLabels: app: my-dep-02 template: metadata: labels: app: my-dep-02 spec: containers: - image: nginx name: nginx
通过 kubectl apply -f deployment.yaml 命令实现部署
扩容:如 原本只有node1和node2部署了mynginx(pod) ,扩容让node3和node4也部署了mynginx;
缩容:如 原本只有node1和node2、node3、node4 部署了mynginx(pod) ,缩容让node3和node4不再部署mynginx。
动态扩缩容:让k8s自己判断什么时候进行扩缩容
扩容到5份
$ kubectl scale --replicas=5 deployment/my-dep-02
缩容到2分
$ kubectl scale --replicas=2 deployment/my-dep-02
注意:
通过这种方式更改,实际的配置文件是不会被修改的!
当然,也可以通过直接修改配置文件来修改副本数量,然后重新apply一下也能达到预期的效果
$ kubectl edit deployment my-dep-02
监控pods实时运行情况
$ watch -n 1 kubectl get pods -o wide
Every 1.0s: kubectl get pods -o wide Sat Feb 12 05:06:27 2022
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-dep-02-7b9d6bb69c-6fvz7 1/1 Running 0 74m 172.31.166.144 node1 <none> <none>
my-dep-02-7b9d6bb69c-dhjgr 1/1 Running 0 94m 172.31.104.16 node2 <none> <none>
my-dep-02-7b9d6bb69c-hzs8f 1/1 Running 0 107m 172.31.104.15 node2 <none> <none>
在node1中查看pods 对应的容器并暂停容器模拟故障
[root@node1 ~]# docker ps | grep my-dep-02-7b9d6bb69c-6fvz7
1a0f48f741dd nginx "/docker-entrypoint.…" About an hour ago Up About an hour k8s_nginx_my-dep-02-7b9d6bb69c-6fvz7_default_ae7938d7-cadc-439e-8d55-d25bde42431f_0
784bd86ee511 registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/pause:3.2 "/pause" About an hour ago Up About an hour k8s_POD_my-dep-02-7b9d6bb69c-6fvz7_default_ae7938d7-cadc-439e-8d55-d25bde42431f_0
[root@node1 ~]# docker stop 1a0f48f741dd
然后查看监控
但几秒后又恢复如初,并显示重启了一次
直接关闭node1机器
n分钟后(排除是因为网络故障而导致的失联),发现pods在node2机器中重新建立
注意:这里转移时间比较长!
监控打印pods状态变化过程
$ kubectl get pods -w
[root@k8s-mater ~]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
my-dep-02-7b9d6bb69c-6fvz7 1/1 Running 1 82m
my-dep-02-7b9d6bb69c-dhjgr 1/1 Running 0 102m
my-dep-02-7b9d6bb69c-hzs8f 1/1 Running 0 115m
my-dep-02-7b9d6bb69c-6fvz7 1/1 Terminating 1 86m
讲解:
将一个pod集群在正常提供服务时从V1版本升级成 V2版本
更新my-dep02的pod的镜像版本,–record表示记录更新
$ kubectl set image deployment/my-dep-02 nginx=nginx:1.16.1 --record
[root@k8s-mater ~]# kubectl set image deployment/my-dep-02 nginx=nginx:1.16.1 --record
deployment.apps/my-dep-02 image updated
过程是不断启动新的pod,然后去除旧的(这个只能查看更新过程的情况没有实际作用)
$ kubectl rollout status deployment/my-dep-02
通过修改deployment配置文件实现更新(可以查看image位置的版本情况)
$ kubectl edit deployment/my-dep-02
首先查看历史版本
$ kubectl rollout history deployment/my-dep-02
[root@k8s-mater ~]# kubectl rollout history deployment/my-dep-02
deployment.apps/my-dep-02
REVISION CHANGE-CAUSE
1 <none>
2 <none>
3 kubectl set image deployment/my-dep-02 nginx=nginx:1.16.1 --record=true
查看某个历史详情
$ kubectl rollout history deployment/my-dep-02 --revision=3
回滚到上一次的版本
$ kubectl rollout undo deployment/my-dep-02
回滚到指定的版本
$ kubectl rollout undo deployment/my-dep-02 --to-revision=1
[root@k8s-mater ~]# kubectl rollout undo deployment/my-dep-02 --to-revision=1
deployment.apps/my-dep-02 rolled back
查看deployment的配置文件来确定当前nginx的镜像版本
[root@k8s-mater ~]# kubectl get deployment/my-dep-02 -o yaml | grep image
f:imagePullPolicy: {}
f:image: {}
- image: nginx
imagePullPolicy: Always
工作中使用工作负载操作pod,使pod具有更强大的功能。
除了Deployment,k8s还有 StatefulSet 、DaemonSet 、Job 等 类型资源。我们都称为 工作负载。
有状态应用使用 StatefulSet 部署,无状态应用使用 Deployment 部署
例如:
有状态应用部署:如redis中的数据不能丢失,所以要采用有状态应用部署
(Service 缩写:svc)
以上内容的pod中的容器我们在外网都无法访问,使用Service来解决(–type=NodePort)。
Service是 将一组 Pods 公开为网络服务的抽象方法。
负载均衡:请求分摊到多个 操作单元上(pod)进行执行;
服务发现:服务发现是指使用一个注册中心来记录分布式系统中的全部服务的信息,以便其他服务能够快速的找到这些已注册的服务。
服务发现示列:如果其中一个pod崩了Service也能及时发现,不将请求转发到该pod上,但pod恢复后,请求又可以转发到该pod。
修改3个pod中nginx容器的欢迎页
cd /usr/share/nginx/html;echo "nginx-01" > index.html
cd /usr/share/nginx/html;echo "nginx-02" > index.html
cd /usr/share/nginx/html;echo "nginx-03" > index.html
cat index.html
测试一下
[root@k8s-mater ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-dep-02-7b9d6bb69c-7cw2q 1/1 Running 0 108m 172.31.166.150 node1 <none> <none>
my-dep-02-7b9d6bb69c-qgqrh 1/1 Running 0 109m 172.31.166.149 node1 <none> <none>
my-dep-02-7b9d6bb69c-wjwkt 1/1 Running 0 108m 172.31.166.151 node1 <none> <none>
[root@k8s-mater ~]# curl 172.31.166.150
nginx-02
[root@k8s-mater ~]# curl 172.31.166.149
nginx-03
[root@k8s-mater ~]# curl 172.31.166.151
nginx-01
暴露deployment的服务和端口,进行端口映射,创建出具有ip地址的Service (pod的集群)
$ kubectl expose deployment my-dep-02 --port=8000 --target-port=80
查看集群ip(集群ip的网段范围在 初始化主节点时的–service-cidr=10.96.0.0/16 配置了)
[root@k8s-mater ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 26h
my-dep-02 ClusterIP 10.96.171.182 <none> 8000/TCP 5m59s
访问集群访问(负载均衡,分摊请求的压力),k8s集群内都可访问(包括启动pod内部),但外网不行。
[root@k8s-mater ~]# curl 10.96.171.182:8000
nginx-02
[root@k8s-mater ~]# curl 10.96.171.182:8000
nginx-01
[root@k8s-mater ~]# curl 10.96.171.182:8000
nginx-03
查看pod标签
[root@k8s-mater ~]# kubectl get pod --show-labels
NAME READY STATUS RESTARTS AGE LABELS
my-dep-02-7b9d6bb69c-7cw2q 1/1 Running 0 126m app=my-dep-02,pod-template-hash=7b9d6bb69c
my-dep-02-7b9d6bb69c-qgqrh 1/1 Running 0 126m app=my-dep-02,pod-template-hash=7b9d6bb69c
my-dep-02-7b9d6bb69c-wjwkt 1/1 Running 0 126m app=my-dep-02,pod-template-hash=7b9d6bb69c
使用标签检索pod
$ kubectl get pod -l app=my-dep-02
[root@k8s-mater ~]# kubectl get pod -l app=my-dep-02
NAME READY STATUS RESTARTS AGE
my-dep-02-7b9d6bb69c-7cw2q 1/1 Running 0 112m
my-dep-02-7b9d6bb69c-qgqrh 1/1 Running 0 113m
my-dep-02-7b9d6bb69c-wjwkt 1/1 Running 0 112m
查看service/my-dep-02 的yaml配置文件
$ kubectl get service/my-dep-02 -o yaml
显示的内容
apiVersion: v1 kind: Service metadata: creationTimestamp: "2022-02-12T12:55:38Z" labels: app: my-dep-02 # ... spec: clusterIP: 10.96.171.182 clusterIPs: - 10.96.171.182 ports: - port: 8000 protocol: TCP targetPort: 80 selector: app: my-dep-02 sessionAffinity: None type: ClusterIP status: loadBalancer: {}
在pod内部域名访问Service集群
域名构成规则: 服务名.名称空间.svc
如:my-dep-02.default.svc
新建pod进行测试
root@my-tomcat-5987455b6b-npkr6:/usr/local/tomcat# curl my-dep-02.default.svc:8000
nginx-03
root@my-tomcat-5987455b6b-npkr6:/usr/local/tomcat# curl my-dep-02.default.svc:8000
nginx-02
root@my-tomcat-5987455b6b-npkr6:/usr/local/tomcat# curl my-dep-02.default.svc:8000
nginx-03
root@my-tomcat-5987455b6b-npkr6:/usr/local/tomcat# curl my-dep-02.default.svc:8000
nginx-01
这里测试 curl my-dep-02:8000也可以(因为默认default,不加也行,要是别的空间,就必须跟上了)
root@my-tomcat-5987455b6b-npkr6:/usr/local/tomcat# curl my-dep-02:8000
nginx-03
root@my-tomcat-5987455b6b-npkr6:/usr/local/tomcat# curl my-dep-02:8000
nginx-02
root@my-tomcat-5987455b6b-npkr6:/usr/local/tomcat# curl my-dep-02:8000
nginx-02
删除service
$ kubectl delete service my-dep-02
默认就是ClusterIP 等同于没有–type的
$ kubectl expose deployment my-dep-02 --port=8000 --target-port=80 --type=ClusterIP #所以加与不加都是ClusterIP
$ kubectl expose deployment my-dep-02 --port=8000 --target-port=80 --type=NodePort #要想在集群外部,就可以通过宿主机ip加暴露的端口访问pod里面的服务
查看服务
[root@k8s-mater ~]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 38h
my-dep-02 NodePort 10.96.241.166 <none> 8000:30670/TCP 57m
每个部署pod的机器都会开30670端口(NodePort范围在 30000-32767 之间)
现在是通过serviceip+service暴露的端口访问
[root@k8s-mater ~]# curl 10.96.241.166:8000
nginx-02
[root@k8s-mater ~]# curl 10.96.241.166:8000
nginx-01
[root@k8s-mater ~]# curl 10.96.241.166:8000
nginx-01
[root@k8s-mater ~]# curl 10.96.241.166:8000
nginx-03
现在是通过宿主机ip+NodePort暴露的端口访问
ip+port映射: 集群外(mater、node1、node2的ip):30670 映射到 10.96.241.166:8000
如外网访问 http://192.168.64.128:30670/
官网地址:https://kubernetes.github.io/ingress-nginx/
Ingress:Service的统一网关入口(如百度的统一域名访问,统一Service层),Ingress是k8s机器集群的统一入口,请求流量先经过Ingress(入口)再进入集群内接受服务。
service是为一组pod服务提供一个统一集群内访问入口或外部访问的随机端口,而ingress做得是通过反射的形式对服务进行分发到对应的service上。
service一般是针对内部的,集群内部调用,而ingress应该是针对外部调用的
service只是开了端口,可以通过服务器IP:端口的方式去访问,但是服务器IP还是可变的,Ingress应该就是作为网关去转发
因为有很多服务,入口不统一,不方便管理
ingress rule相当于定义了路由规则,通过ingress controlle动态将各路由规则写到第三方的load balancer(比如nginx)
具体有第三方的load balancer做路由转发到对应的pods,ingress主要是实现动态将路由规则写到第三方的load balancer
下载ingress的yaml文件用于安装到k8s中
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v0.47.0/deploy/static/provider/baremetal/deploy.yaml
如果下载不了可以使用下面一个人yaml信息
下载好后应用 deploy.yaml
$ kubectl apply -f deploy.yaml
查看安装结果
$ kubectl get pod,svc -n ingress-nginx
查看安装情况
[root@k8s-mater Downloads]# kubectl get pod -A | grep ingress
ingress-nginx ingress-nginx-admission-create-nnx47 0/1 Completed 0 68s
ingress-nginx ingress-nginx-admission-patch-r9xxb 0/1 Completed 0 68s
ingress-nginx ingress-nginx-controller-65bf56f7fc-njsmd 0/1 ContainerCreating 0 68s
[root@k8s-mater ~]# kubectl get service -A | grep ingress
ingress-nginx ingress-nginx-controller NodePort 10.96.105.233 <none> 80:30813/TCP,443:31761/TCP 27m
ingress-nginx ingress-nginx-controller-admission ClusterIP 10.96.129.25 <none> 443/TCP 27m
测试访问一下我们安装并暴露的ingress服务
访问 服务器ip:30813 和 服务器ip:31761
映射:80:30813/TCP,443:31761/TCP
所有的服务器都开放了30813和31761的端口
端口30813 用于处理http请 (http的TCP端口是80)
端口31761用于处理https请求(https的TCP端口是443)
注意: Ingress就是用nginx作的
apiVersion: apps/v1 kind: Deployment metadata: name: hello-server spec: replicas: 2 selector: matchLabels: app: hello-server template: metadata: labels: app: hello-server spec: containers: - name: hello-server image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/hello-server ports: - containerPort: 9000 --- apiVersion: apps/v1 kind: Deployment metadata: labels: app: nginx-demo name: nginx-demo spec: replicas: 2 selector: matchLabels: app: nginx-demo template: metadata: labels: app: nginx-demo spec: containers: - image: nginx name: nginx --- apiVersion: v1 kind: Service metadata: labels: app: nginx-demo name: nginx-demo spec: type: NodePort selector: app: nginx-demo ports: - port: 8000 protocol: TCP targetPort: 80 --- apiVersion: v1 kind: Service metadata: labels: app: hello-server name: hello-server spec: type: NodePort selector: app: hello-server ports: - port: 8000 protocol: TCP targetPort: 9000
测试的yaml文件名称为:ingresstest.yaml
应用配置文件,部署了2个Deployment,2个Service
$ kubectl apply -f Ingresstest.yaml
查看service情况
[root@k8s-mater Downloads]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
hello-server ClusterIP 10.96.61.143 <none> 8000/TCP 2m48s
nginx-demo ClusterIP 10.96.252.133 <none> 8000/TCP 2m48s
# ...
[root@k8s-mater Downloads]# curl 10.96.61.143:8000
Hello World!
[root@k8s-mater Downloads]# curl 10.96.252.133
# ...
<h1>Welcome to nginx!</h1>
# ...
查看deployment
[root@k8s-mater Downloads]# kubectl get deployment -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
hello-server 2/2 2 2 3m47s hello-server registry.cn*** app=hello-server
nginx-demo 2/2 2 2 3m47s nginx nginx app=nginx-demo
查看pod
[root@k8s-mater Downloads]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
hello-server-6cbb679d85-pf7jx 1/1 Running 0 13m 172.31.166.158 node1 <none> <none>
hello-server-6cbb679d85-z7b65 1/1 Running 0 13m 172.31.166.159 node1 <none> <none>
nginx-demo-7d56b74b84-2xjh4 1/1 Running 0 13m 172.31.166.160 node1 <none> <none>
nginx-demo-7d56b74b84-hskdx 1/1 Running 0 13m 172.31.104.24 node2 <none> <none>
目标
访问 hello.test.com 的请求由 hello-server (Service)集群处理
访问 demo.test.com 的请求由 nginx-demo (Service)集群处理
Ingress(网关)根据请求的域名分配对应的Service去处理
配置文件:ingresscom.yaml
apiVersion: networking.k8s.io/v1 kind: Ingress # 类型 metadata: name: ingress-host-bar spec: ingressClassName: nginx rules: - host: "hello.test.com" #域名 http: paths: - pathType: Prefix # 前缀 path: "/" backend: service: name: hello-server # Service 名称 port: number: 8000 # 端口 - host: "demo.test.com" http: paths: - pathType: Prefix path: "/nginx" # 把请求会转给下面的service,下面的service一定要能处理这个路径,不能处理就是404 backend: service: name: nginx-demo # java,比如使用路径重写,去掉前缀nginx port: number: 8000
应用配置文件
$ kubectl apply -f ingresscom.yaml
[root@k8s-mater Downloads]# kubectl apply -f ingresscom.yaml
ingress.networking.k8s.io/ingress-host-bar created
查看刚才应用ingresscom文件后生成网关(ingress)
[root@k8s-mater Downloads]# kubectl get ingress
NAME CLASS HOSTS ADDRESS PORTS AGE
ingress-host-bar nginx hello.test.com,demo.test.com 192.168.64.130 80 30s
192.168.64.128 hello.test.com
192.168.64.128 demo.test.com
访问测试
访问 http://hello.test.com:30813/
访问 https://demo.test.com:31761/ ,nginx是由Ingress层返回的
访问 https://demo.test.com:31761/nginx ,nginx是由nginx-demo中的pod返回的
修改Ingress配置文件,将path: "/nginx"改成path: “/nginx.html”
$ kubectl edit ingress ingress-host-bar
进入nginx-demo中的pod的nginx容器内修改nginx文件
kubectl exec -it podname – /bin/bash
cd /usr/share/nginx/html;echo "<h1>Hello nginx2</h1>" > nginx.html;ls
总结ingress实现
路径重写的环境搭建
修改配置文件 Ingresscom.yaml
apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: # 路径重写配置功能开启 nginx.ingress.kubernetes.io/rewrite-target: /$2 name: ingress-host-bar spec: ingressClassName: nginx #这个位置为nginx,最好不要修改 rules: - host: "hello.test.com" http: paths: - pathType: Prefix path: "/" backend: service: name: hello-server port: number: 8000 - host: "demo.test.com" http: paths: - pathType: Prefix path: "/nginx(/|$)(.*)" # 配置忽略/nginx backend: service: name: nginx-demo port: number: 8000
重新应用
$ kubectl apply -f Ingresscom.yaml
[root@k8s-mater Downloads]# kubectl apply -f Ingresscom.yaml
ingress.networking.k8s.io/ingress-host-bar configured
效果:
访问 https://demo.test.com:31761/nginx 相当于访问 https://demo.test.com:31761/
访问 https://demo.test.com:31761/nginx/ 也相当于访问 https://demo.test.com:31761/
访问 https://demo.test.com:31761/nginx/nginx.html 相当于访问 https://demo.test.com:31761/nginx.html
docker有目录挂载的功能,但直接挂载,挂载目录繁多,难以管理,同时一旦发生故障转移,转移后的pod在容器挂载目录在
转移后的主机上不存在。
k8s的解决方案是,将服务器用于挂载目录组成存储层,存储层中的挂载目录由k8s统一管理,存储层使用技术可自定义(Glusterfs,NFS,CephFS等)。
所有服务器(节点)安装NFS工具
yum install -y nfs-utils
主节点(mater)操作
暴露目录,开发权限
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
创建目录
mkdir -p /nfs/data
启动rpc远程绑定
systemctl enable rpcbind --now
使配置生效
exportfs -r
查看配置
[root@k8s-mater ~]# exportfs
/nfs/data <world>
在安装nfs的服务器中,都要启动nfs服务
sudo systemctl start nfs-server
sudo systemctl enable nfs-server
sudo systemctl status nfs-server
从节点(node1、node2)操作
显示可用挂载点 showmount -e 主节点ip
showmount -e 192.168.64.128
如果是外网服务器的,不能查看到信息或则超时的,需要控制台打开端口
[root@node1 ~]# showmount -e 192.168.64.128
Export list for 192.168.64.128:
/nfs/data *
创建目录
在你的工作节点
mkdir -p /nfs/data
挂载 (执行以下命令挂载 nfs 服务器上的共享目录到本机路径 /nfs/data)
在你的工作节点执行,你要挂载的哪个nfs服务器(maste节点)的那个目录
mount -t nfs 192.168.64.128:/nfs/data /nfs/data
写入一个测试文件,>是覆盖写, >> 是追加
在工作节点执行,执行后,(master节点)192.168.64.128:/nfs/data 目录也会有这个文件
echo "hello nfs server" > /nfs/data/test.txt
如果想让其他工作节点也有这个测试文件,需要你在指定的工作节点上进行挂载((master)192.168.64.128:/nfs/data)目录
mount -t nfs 192.168.64.128:/nfs/data /nfs/data
问题:
如果(master)192.168.64.128:/nfs/data 服务器宕机,是否无法进行挂载!!!
答:整个nfs挂载环境不能用,所以nfs要进行高可用部署
cat /nfs/data/test.txt
创建目录
mkdir /nfs/data/nginx-pv/
因为是挂载目录关系,只需要在master下或则其中一个工作节点中创建就能能实现所有挂载目录的创建
配置文件 mountnfs.yaml
容器路径 /usr/share/nginx/html 映射到 服务器路径 /nfs/data/nginx-pv
2个pod挂载同一路径
apiVersion: apps/v1 kind: Deployment metadata: labels: app: nginx-pv-demo name: nginx-pv-demo spec: replicas: 2 selector: matchLabels: app: nginx-pv-demo template: metadata: labels: app: nginx-pv-demo spec: containers: - image: nginx name: nginx volumeMounts: - name: html mountPath: /usr/share/nginx/html volumes: - name: html nfs: server: 192.168.64.128 path: /nfs/data/nginx-pv/
应用配置文件
$ kubectl apply -f mountnfs.yaml
测试,挂载成功
[root@k8s-mater Downloads]# cd /nfs/data/nginx-pv/
[root@k8s-mater nginx-pv]# echo "Hello Mount" > index.html
root@nginx-pv-demo-fc6c6dd8d-smhzx:/# cd /usr/share/nginx/html
root@nginx-pv-demo-fc6c6dd8d-smhzx:/usr/share/nginx/html# ls
index.html
root@nginx-pv-demo-fc6c6dd8d-smhzx:/usr/share/nginx/html# cat index.html
Hello Mount
[root@k8s-mater nginx-pv]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-pv-demo-fc6c6dd8d-smhzx 1/1 Running 0 4m27s 172.31.166.166 node1 <none> <none>
nginx-pv-demo-fc6c6dd8d-vnwdc 1/1 Running 0 4m27s 172.31.166.165 node1 <none> <none>
[root@k8s-mater nginx-pv]# curl 172.31.166.166
Hello Mount
注意:使用pv是需要nfs的!!!
1.首先要确保已经安装好了yum install -y nfs-utils rpcbind
2.要确定要本地挂载的目录
3.在/etc/exports文件中添加你要挂载的本地目录权限
如:
/nfs/data/ *(insecure,rw,sync,no_root_squash)
/pvcnfs/ *(insecure,rw,sync,no_root_squash)
执行一下(刷新配置,使之生效):
#exportfs -rv
执行一下:
查看本机发布的共享目录
#showmount -e
4.手动加载 NFS 共享服务时,应该先启动 rpcbind,再启动 nfs
systemctl start rpcbind && systemctl enable rpcbind
systemctl start nfs && systemctl enable nfs
5.查看 rpcbind 端口是否开启,rpcbind 服务默认使用 tcp 端口 111
netstat -anpt | grep rpcbind
6.pv.yaml中
storageClassName: nfs # 存储类名可自定义
然后就可以被pv挂载使用啦!
NFS(原生)方式数据挂载存在一些问题:
目录要自己创建
Deployment及其pod删除后,服务器目录数据依旧存在
挂载容量没有限制.
PV:持久卷(Persistent Volume),将应用需要持久化的数据保存到指定位置(存放持久化数据的目录就是持久卷)
PVC:持久卷申明(Persistent Volume Claim),申明需要使用的持久卷规格 (申请持久卷的申请书)
静态供应: 提取指定位置和空间大小
动态供应:位置和空间大小由pv自动创建
创建pv池
nfs主节点(mater)操作
mkdir -p /nfs/data/01
mkdir -p /nfs/data/02
mkdir -p /nfs/data/03
创建三个 PV(持久卷)静态供应的方式,配置文件createPV.yaml
apiVersion: v1 kind: PersistentVolume # 类型 metadata: name: pv01-10m # 名称 spec: capacity: storage: 10M # 持久卷空间大小 accessModes: - ReadWriteMany # 多节点可读可写 storageClassName: nfs # 存储类名 nfs: path: /nfs/data/01 # pc目录位置 server: 192.168.64.128 --- apiVersion: v1 kind: PersistentVolume metadata: name: pv02-1gi spec: capacity: storage: 1Gi # 持久卷空间大小 accessModes: - ReadWriteMany storageClassName: nfs nfs: path: /nfs/data/02 # pc目录位置 server: 192.168.64.128 --- apiVersion: v1 kind: PersistentVolume metadata: name: pv03-3gi spec: capacity: storage: 3Gi # 持久卷空间大小 accessModes: - ReadWriteMany storageClassName: nfs nfs: path: /nfs/data/03 # pc目录位置 server: 192.168.64.128
应用
[root@k8s-mater Downloads]# kubectl apply -f createPV.yaml
persistentvolume/pv01-10m created
persistentvolume/pv02-1gi created
persistentvolume/pv03-3gi created
查看PV
[root@k8s-mater Downloads]# kubectl get persistentvolume
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01-10m 10M RWX Retain Available nfs 26s
pv02-1gi 1Gi RWX Retain Available nfs 26s
pv03-3gi 3Gi RWX Retain Available nfs 26s
创建PVC
createPVC.yaml
kind: PersistentVolumeClaim # 类型
apiVersion: v1
metadata:
name: nginx-pvc # PVC名称
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi # 需要空间,它会更具定义的大小自动去找与之匹配的PV卷
storageClassName: nfs # 要对应PV的存储类名称storageClassName
应用
[root@k8s-mater Downloads]# kubectl apply -f createPVC.yaml
persistentvolumeclaim/nginx-pvc created
查看PVC,挂载 pv02-1gi,挂载目录为 /nfs/data/02
[root@k8s-mater Downloads]# kubectl get persistentvolumeclaim
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
nginx-pvc Bound pv02-1gi 1Gi RWX nfs 17s
查看PC,状态Bound(绑定),说明已经被使用,绑定信息: default/nginx-pvc => 名称空间/PVC名称(绑定空间选择最小且大小足够的)
[root@k8s-mater Downloads]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01-10m 10M RWX Retain Available nfs 14m
# 状态Bound(绑定)
pv02-1gi 1Gi RWX Retain Bound default/nginx-pvc nfs 14m
pv03-3gi 3Gi RWX Retain Available nfs 14m
配置文件 boundPVC.yaml
apiVersion: apps/v1 kind: Deployment metadata: labels: app: nginx-deploy-pvc # Deployment名称 name: nginx-deploy-pvc spec: replicas: 2 # pod数量 selector: matchLabels: app: nginx-deploy-pvc template: metadata: labels: app: nginx-deploy-pvc spec: containers: - image: nginx name: nginx volumeMounts: - name: html mountPath: /usr/share/nginx/html # 挂载目录 volumes: - name: html persistentVolumeClaim: claimName: nginx-pvc # pvc 的名称
应用
[root@k8s-mater Downloads]# kubectl apply -f boundPVC.yaml
deployment.apps/nginx-deploy-pvc created
测试
向挂载目录 /nfs/data/02写入测试文件
[root@k8s-mater Downloads]# cd /nfs/data/02
[root@k8s-mater 02]# echo "boundPVC test nginx" > index.html
访问成功
[root@k8s-mater 02]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deploy-pvc-79fc8558c7-26grh 1/1 Running 0 15m 172.31.166.168 node1 <none> <none>
nginx-deploy-pvc-79fc8558c7-2gz4d 1/1 Running 0 15m 172.31.166.169 node1 <none> <none>
[root@k8s-mater 02]# curl 172.31.166.168
boundPVC test nginx
总结下这个PV&PVC
ConfigMap 缩写为cm
ConfigMap(配置集):用于配置文件挂载,抽取应用配置,并且可以自动更新。
以redis为示例
创建 redis.conf
appendonly yes
把之前的配置文件创建为配置集
创建配置,redis保存到 k8s的etcd(k8s资料库)
$ kubectl create configmap redis-conf --from-file=redis.conf
[root@k8s-mater Downloads]# kubectl create configmap redis-conf --from-file=redis.conf
configmap/redis-conf created
查看ConfigMap(配置集)
[root@k8s-mater Downloads]# kubectl get configmap
NAME DATA AGE
kube-root-ca.crt 1 2d14h
redis-conf 1 31s
查看 redis-conf(配置集)的配置文件
$ kubectl get configmap redis-conf -o yaml
在这里插入代码片
[root@k8s-mater Downloads]#
apiVersion: v1
data: # data是所有真正的数据,key:默认是文件名 value:配置文件的内容
redis.conf: |
appendonly yes
kind: ConfigMap # 类型
metadata:
# ...
name: redis-conf
namespace: default
# ...
创建pod
配置文件 cm01.yaml
apiVersion: v1 kind: Pod # 类型 metadata: name: redis # pod名称 spec: containers: - name: redis image: redis # 镜像 command: - redis-server - "/redis-master/redis.conf" #指的是redis容器内部的位置 ports: - containerPort: 6379 volumeMounts: # 配置卷挂载 - mountPath: /data name: data # 卷挂载名称 对应 下面的 挂载卷 data - mountPath: /redis-master name: config # 卷挂载名称 对应 下面的 挂载卷 config volumes: # 挂载卷 - name: data emptyDir: {} #在 Pod 启动时创建,容器可以读写该目录中的文件。当 Pod 从节点中删除时,该目录和其中的数据也将被删除 - name: config #与上面挂载的对应 configMap: # 配置集 name: redis-conf #要对应confimap的名称 items: - key: redis.conf #这个是redis-conf中key名称,来挂载所对应的值到指定path文件中 path: redis.conf #这对应的是容器内要挂载的哪个文件名称
/redis-master 路径 挂载了 配置集 redis.conf
应用
$ kubectl apply -f cm01.yaml
检查默认配置
root@redis:/data# cat /redis-master/redis.conf
appendonly yes
修改 配置集redis.conf 的配置数据,增加 requirepass 123456
$ kubectl edit configmap redis-conf
检查配置是否更新,修改了cm,Pod里面的配置文件会跟着改变
root@redis:/data# cat /redis-master/redis.conf
appendonly yes
requirepass 123456
如果配置值未更改,因为需要重新启动 Pod 才能从关联的 ConfigMap 中获取更新的值。
原因:我们的Pod部署的中间件自己本身没有热更新能力
9、 进入pod内的Redis,查看配置
$ kubectl exec -it redis -- redis-cli
127.0.0.1:6379> CONFIG GET appendonly
"appendonly"
"yes"
127.0.0.1:6379> CONFIG GET requirepass
"requirepass"
""
另外,创建configmap可以使用yaml方式,更方便
config-map-redis.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-config
data:
redis-config: |+
requirepass 123456
正常应用到k8s中就好:kubectl apply -f config-map-redis.yaml
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。