赞
踩
# centos7.9,建议机器4核8G sudo yum remove docker* sudo yum install -y yum-utils #配置docker的yum地址 sudo yum-config-manager \ --add-repo \ http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo #安装指定版本 sudo yum install -y docker-ce-20.10.7 docker-ce-cli-20.10.7 containerd.io-1.4.6 # 启动&开机启动docker systemctl enable docker --now # docker加速配置 sudo mkdir -p /etc/docker sudo tee /etc/docker/daemon.json <<-'EOF' { "registry-mirrors": ["https://82m9ar63.mirror.aliyuncs.com"], "exec-opts": ["native.cgroupdriver=systemd"], "log-driver": "json-file", "log-opts": { "max-size": "100m" }, "storage-driver": "overlay2" } EOF sudo systemctl daemon-reload sudo systemctl restart docker
首先基本环境的安装,每个机器使用内网ip互通,每个机器配置自己的hostname,不能用localhost
#设置每个机器自己的hostname,k8s-master/node1/node2 hostnamectl set-hostname xxx # 将 SELinux 设置为 permissive 模式(相当于将其禁用) sudo setenforce 0 sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config # 关闭防火墙,云的花内网需要互通 systemctl stop firewalld # 禁用 firewalld 服务 systemctl disable firewalld #关闭swap swapoff -a sed -ri 's/.*swap.*/#&/' /etc/fstab #允许 iptables 检查桥接流量 cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf br_netfilter EOF cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables = 1 net.bridge.bridge-nf-call-iptables = 1 EOF sudo sysctl --system
然后安装kubelet、kubeadm、kubectl
#配置k8s的yum源地址 cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo [kubernetes] name=Kubernetes baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 enabled=1 gpgcheck=0 repo_gpgcheck=0 gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg EOF #安装 kubelet,kubeadm,kubectl sudo yum install -y kubelet-1.21.9 kubeadm-1.21.9 kubectl-1.21.9 #启动kubelet sudo systemctl enable --now kubelet #所有机器配置master域名,改成自己的ip echo "192.168.249.146 k8s-master" >> /etc/hosts cat > /etc/hosts <<EOF 192.168.249.146 k8s-master 192.168.249.147 node1 192.168.249.148 node2 EOF
初始化master节点
# 注意ip和主机名的修改 kubeadm init \ --apiserver-advertise-address=192.168.249.146 \ --control-plane-endpoint=k8s-master \ --image-repository registry.aliyuncs.com/google_containers \ --kubernetes-version v1.21.9 \ --service-cidr=10.96.0.0/12 \ --pod-network-cidr=10.244.0.0/16 # 然后记录相关信息 mkdir -p $HOME/.kube sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config sudo chown $(id -u):$(id -g) $HOME/.kube/config # 安装Calico网络插件 curl https://docs.projectcalico.org/v3.20/manifests/calico.yaml -O # 如果失败了就自己手动下载导入 # https://github.com/projectcalico/calico/releases kubectl apply -f calico.yaml # 如果手动下载,无法下载镜像时,所有结点导入calico-cni/calico-node/calico-pod2daemon-flexvol,master结点需要额外导入calico-kube-controllers # 最后node结点加入集群 # 对于重置结点,现在主节点去除 kubectl delete node node2 kubectl get node # 然后node2从节点重置 kubeadm reset
安装nfs-server
# 在每个机器 yum install -y nfs-utils # 在master 执行以下命令 echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports # 执行以下命令,启动 nfs 服务;创建共享目录 mkdir -p /nfs/data # 在master执行 systemctl enable rpcbind systemctl enable nfs-server systemctl start rpcbind systemctl start nfs-server # 使配置生效 exportfs -r #检查配置是否生效 exportfs # 配置nfs-client(选做) # 注意在非master结点,注意ip修改 showmount -e 192.168.249.146 mkdir -p /nfs/data mount -t nfs 192.168.249.146:/nfs/data /nfs/data
配置默认存储,在master执行,vim sc.yaml
,注意nfs地址改成自己的
## 创建了一个存储类 apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: nfs-storage annotations: storageclass.kubernetes.io/is-default-class: "true" provisioner: k8s-sigs.io/nfs-subdir-external-provisioner parameters: archiveOnDelete: "true" ## 删除pv的时候,pv的内容是否要备份 --- apiVersion: apps/v1 kind: Deployment metadata: name: nfs-client-provisioner labels: app: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default spec: replicas: 1 strategy: type: Recreate selector: matchLabels: app: nfs-client-provisioner template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: registry.cn-beijing.aliyuncs.com/kubesphereio/nfs-subdir-external-provisioner:v4.0.2 # resources: # limits: # cpu: 10m # requests: # cpu: 10m volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: k8s-sigs.io/nfs-subdir-external-provisioner - name: NFS_SERVER value: 192.168.249.146 ## 指定自己nfs服务器地址 - name: NFS_PATH value: /nfs/data ## nfs服务器共享的目录 volumes: - name: nfs-client-root nfs: server: 192.168.249.146 path: /nfs/data --- apiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: Role name: leader-locking-nfs-client-provisioner apiGroup: rbac.authorization.k8s.io
确认配置是否生效,kubectl get sc
,这样配置完成之后就拥有动态创建pv的能力,例如如下可以自行测试
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: my-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 3Gi
metrics-server是集群指标监控组件(这里用了别人的镜像库),vim metrics.yaml
apiVersion: v1 kind: ServiceAccount metadata: labels: k8s-app: metrics-server name: metrics-server namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: k8s-app: metrics-server rbac.authorization.k8s.io/aggregate-to-admin: "true" rbac.authorization.k8s.io/aggregate-to-edit: "true" rbac.authorization.k8s.io/aggregate-to-view: "true" name: system:aggregated-metrics-reader rules: - apiGroups: - metrics.k8s.io resources: - pods - nodes verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: k8s-app: metrics-server name: system:metrics-server rules: - apiGroups: - "" resources: - pods - nodes - nodes/stats - namespaces - configmaps verbs: - get - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: k8s-app: metrics-server name: metrics-server-auth-reader namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: extension-apiserver-authentication-reader subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: k8s-app: metrics-server name: metrics-server:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: k8s-app: metrics-server name: system:metrics-server roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:metrics-server subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- apiVersion: v1 kind: Service metadata: labels: k8s-app: metrics-server name: metrics-server namespace: kube-system spec: ports: - name: https port: 443 protocol: TCP targetPort: https selector: k8s-app: metrics-server --- apiVersion: apps/v1 kind: Deployment metadata: labels: k8s-app: metrics-server name: metrics-server namespace: kube-system spec: selector: matchLabels: k8s-app: metrics-server strategy: rollingUpdate: maxUnavailable: 0 template: metadata: labels: k8s-app: metrics-server spec: containers: - args: - --cert-dir=/tmp - --kubelet-insecure-tls - --secure-port=4443 - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --kubelet-use-node-status-port image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/metrics-server:v0.4.3 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: path: /livez port: https scheme: HTTPS periodSeconds: 10 name: metrics-server ports: - containerPort: 4443 name: https protocol: TCP readinessProbe: failureThreshold: 3 httpGet: path: /readyz port: https scheme: HTTPS periodSeconds: 10 securityContext: readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 volumeMounts: - mountPath: /tmp name: tmp-dir nodeSelector: kubernetes.io/os: linux priorityClassName: system-cluster-critical serviceAccountName: metrics-server volumes: - emptyDir: {} name: tmp-dir --- apiVersion: apiregistration.k8s.io/v1 kind: APIService metadata: labels: k8s-app: metrics-server name: v1beta1.metrics.k8s.io spec: group: metrics.k8s.io groupPriorityMinimum: 100 insecureSkipTLSVerify: true service: name: metrics-server namespace: kube-system version: v1beta1 versionPriority: 100
测试
kubectl top nodes
kubectl top pod -A
安装文档参考:https://kubesphere.com.cn/docs/v3.3/quick-start/minimal-kubesphere-on-k8s/
# 这里安装需要国外源网络 # 下载核心文件 wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.1/kubesphere-installer.yaml wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.1/cluster-configuration.yaml kubectl apply -f kubesphere-installer.yaml # 注意在启动这个文件前可以选择额外功能,这里默认最小安装,要啥功能将false改为true即可 kubectl apply -f cluster-configuration.yaml # 在 cluster-configuration.yaml中指定我们需要开启的功能参照官网"启用可插拔组件":https://kubesphere.com.cn/docs/pluggable-components/overview/ kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') -f # 成功后访问 # 访问任意机器的 30880端口 # 账号 : admin # 密码 : P@88w0rd # 解决etcd监控证书找不到问题 kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt --from-file=etcd-client.crt=/etc/kubernetes/pki/apiserver-etcd-client.crt --from-file=etcd-client.key=/etc/kubernetes/pki/apiserver-etcd-client.key
官网参考:https://kubesphere.com.cn/docs/v3.3/quick-start/all-in-one-on-linux/
# 首先指定一下hostname hostnamectl set-hostname node1 # 安装 # 准备KubeKey,国内的下载 export KKZONE=cn curl -sfL https://get-kk.kubesphere.io | VERSION=v2.3.0 sh - # 使其拥有可执行权限 chmod +x kk # 开始安装下载 # ./kk create cluster [--with-kubernetes version] [--with-kubesphere version] ./kk create cluster --with-kubernetes v1.22.12 --with-kubesphere v3.3.1 # 验证安装结果 kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f # 输出信息会显示 Web 控制台的 IP 地址和端口号,默认的 NodePort 是 30880。现在,您可以使用默认的帐户和密码 (admin/P@88w0rd) 通过 <NodeIP>:30880 访问控制台 # kubectl get pod --all-namespaces 查看相关组件情况
仅适用于默认的最小化安装。若要在 KubeSphere 中启用其他组件,请参见启用可插拔组件
进入可视化后也可以启用需要的组件,以 admin
用户登录控制台,点击左上角的平台管理,选择集群管理,点击定制资源定义,在搜索栏中输入 clusterconfiguration
,点击选择需要打开的插件即可
官网:https://kubesphere.com.cn/docs/v3.3/installing-on-linux/introduction/multioverview/
# 最小安装最低要求2c4g(如果性能好,推荐master4c8g/node8c16g) # 使用KubeKey创建集群 export KKZONE=cn curl -sfL https://get-kk.kubesphere.io | VERSION=v2.3.0 sh - chmod +x kk # 创建集群配置文件 # ./kk create config [--with-kubernetes version] [--with-kubesphere version] [(-f | --file) path] ./kk create config --with-kubernetes v1.21.4 --with-kubesphere v3.3.1 # 然后这里会生成配置文件,修改配置文件,主要修改三台主机,例如下面,主要是修改ip以及指定master,node等 # =================================================================== # address是公有地址,internalAddress是实例的私有 IP 地址,我这里虚拟机都填写私有地址了 spec: hosts: - {name: master, address: 192.168.249.146, internalAddress: 192.168.249.146, user: root, password: " "} - {name: node1, address: 192.168.249.147, internalAddress: 192.168.249.147, user: root, password: " "} - {name: node2, address: 192.168.249.148, internalAddress: 192.168.249.148, user: root, password: " "} roleGroups: etcd: - master control-plane: - master worker: - node1 - node2 controlPlaneEndpoint: ## Internal loadbalancer for apiservers # internalLoadbalancer: haproxy # ======================================================================== # 使用配置文件创建集群,注意这里可能会有依赖缺失,根据提示yum install xxx安装即可 yum install -y conntrack socat conntrack socat conntrack socat ./kk create cluster -f config-sample.yaml # 结束后可以通过 <NodeIP:30880 使用默认帐户和密码 (admin/P@88w0rd) 访问 KubeSphere 的 Web 控制台 # 查看进度,这个版本好像命令行没有kubectl 了,kubectl在web可以查看 # kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
参考文档:https://kubesphere.com.cn/docs/v3.3/quick-start/create-workspace-and-project/
首先平台管理员admin创建不同用户,包括企业空间管理员bigboss-li和普通成员若干,然后进入企业空间管理员bigboss-li账户,创建不同的企业空间(例如杭州),他可以管理所有企业空间,每一个企业空间里邀请指定一个人为该空间管理员,例如hangzhou-boss,然后该角色可以邀请其他人为普通人员;
然后hangzhou-boss邀请pm-wang为项目总监负责人,wang可以创建多个项目,可以邀请该企业空间的人到不同的项目,并分配不同的角色,例如这个项目的管理员,而项目管理员又可以邀请这个企业空间的人到此项目中,并分配不同的权限,例如开发者/观察者等
官网快速入门例子:https://kubesphere.com.cn/docs/v3.3/quick-start/wordpress-deployment/
dockerhub:https://hub.docker.com/
# 正常docker部署 docker run -p 3306:3306 --name mysql-01 \ -v /mydata/mysql/log:/var/log/mysql \ -v /mydata/mysql/data:/var/lib/mysql \ -v /mydata/mysql/conf:/etc/mysql/conf.d \ -e MYSQL_ROOT_PASSWORD=root \ --restart=always \ -d mysql:5.7 # 首先进入dev-lu的账号,这里管理员已经创建好了项目mail # mysql配置示例 [client] default-character-set=utf8mb4 [mysql] default-character-set=utf8mb4 [mysqld] init_connect='SET collation_connection = utf8mb4_unicode_ci' init_connect='SET NAMES utf8mb4' character-set-server=utf8mb4 collation-server=utf8mb4_unicode_ci skip-character-set-client-handshake skip-name-resolve lower_case_table_names=1
kubesphere首先进去项目(例如我这里项目是mail),然后创建配置(在配置→配置字典里,名字随便取,里面设置key为xx.cnf,值为上面的配置内容);然后创建存储(持久卷声明,这里暂时是local类型进行测试);最后创建有状态副本集,选择镜像,选择容器(使用容器默认配置,然后注意勾选环境变量和时间同步,把MYSQL_ROOT_PASSWORD=root写入,也可以限制一下资源),最后挂载卷和配置文件,成功创建(如果创建失败需要更改一下docker源)
注意集群内部可以直接通过应用的 【服务名.项目名】 直接访问 (默认),可以尝试ping
如果要修改内部访问域名,可以在服务里删除默认的,自行创建内部集群和外部访问两个service(注意是选择指定工作负载),也可以直接从服务里进行部署
# ================这是之前的部署方式============================= #创建配置文件 ## 1、准备redis配置文件内容 mkdir -p /mydata/redis/conf && vim /mydata/redis/conf/redis.conf ##配置示例 appendonly yes port 6379 bind 0.0.0.0 #docker启动redis docker run -d -p 6379:6379 --restart=always \ -v /mydata/redis/conf/redis.conf:/etc/redis/redis.conf \ -v /mydata/redis-01/data:/data \ --name redis-01 redis:6.2.5 \ redis-server /etc/redis/redis.conf
kubesphere方式部署和mysql类似,不过注意有状态副本集建议在内部直接创建存储,这样每增加一个副本就会新增一个pvc,否则会共用一个
# 创建数据目录
mkdir -p /mydata/es-01 && chmod 777 -R /mydata/es-01
# 容器启动
docker run --restart=always -d -p 9200:9200 -p 9300:9300 \
-e "discovery.type=single-node" \
-e ES_JAVA_OPTS="-Xms512m -Xmx512m" \
-v es-config:/usr/share/elasticsearch/config \
-v /mydata/es-01/data:/usr/share/elasticsearch/data \
--name es-01 \
elasticsearch:7.13.4
因为考虑到/usr/share/elasticsearch/config
文件夹下有很多默认配置文件,所以不能直接使用配置文件挂载,否则会直接被覆盖,注意这里创建副本时要选择子路径,选择到具体文件覆盖
参考:https://kubesphere.com.cn/docs/v3.3/pluggable-components/app-store/
官网参考:https://helm.sh/zh/
包搜索中心:https://artifacthub.io/
应用商店应用少,更多建议直接使用helm应用仓库进行安装部署,类似于dockerhub。首先去企业空间→应用管理→应用仓库添加bitnami仓库(需要对应权限角色,仓库地址https://charts.bitnami.com/bitnami
),同步完成后进入项目→应用负载→应用→从项目模板导入,选择导入的bitnami仓库即可选择对应的镜像了
官网:https://gitee.com/y_project/RuoYi-Cloud
架构图:https://gitee.com/zhangmrit/ruoyi-cloud/blob/nacos/doc/ruoyi-cloud.png
首先需要在本地进行跑起来,才能成功上云,首先clone若依项目,然后在本地创建数据库,根据项目数据库文件,注意这里需要自行创建ry-cloud数据库,然后再将quartz和ry表导入,其他两个会自行创建数据库,然后下载Nacos配置中心,进入conf/application.properties
,可以直接在文件末尾直接添加
spring.datasource.platform=mysql
db.num=1
db.url.0=jdbc:mysql://127.0.0.1:3306/ry-config?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
db.user.0=root
db.password.0=root
然后进入bin目录startup.cmd -m standalone
启动,进入后在配置列表修改所有数据库的账号密码(根据自己账号密码来修改),最后启动redis即可,然后启动前后端
首先创建一个新项目his,然后点击应用负载→服务→创建有状态服务(这样就可以直接自定义DNS域名了),根据上面情况部署mysql和redis,然后将数据库文件导入(注意这里需要自行创建ry-cloud数据库,然后再将quartz和ry表导入,其他两个会自行创建数据库);
其次部署nacos集群,这里有个问题,nacos集群要设置每个naocs的IP地址,这里就需要进行有状态部署,这样就可以使用固定域名进行配置。然后我们需要获取一下固定域名,首先创建多个nacos有状态副本,然后进入nacos服务,获取总的nacos域名,打开随机一个nacos容器进行ping his-nacos.his
,这样就可以获取每个nacos的域名规律,比如我的his-nacos-v1-0.his-nacos.his.svc.cluster.local
(pod名+服务名+固定)就代表是nacos1
然后正式创建nacos集群,首先需要抽取两个配置文件application.properties
(上面粘贴,但是数据库ip换成mysql服务器域名,比如我的his-mysql.his
)和cluster.conf
(然后选择服务→有状态部署,取名his-nacos,版本v1)
his-nacos-v1-0.his-nacos.his.svc.cluster.local:8848
his-nacos-v1-1.his-nacos.his.svc.cluster.local:8848
his-nacos-v1-2.his-nacos.his.svc.cluster.local:8848
然后重新创建nacos,挂载映射路径为/home/nacos/conf/xxx
(需要挂载到子路径,否则其他就被覆盖了),启动三个nacos,然后查看日志,成功暴露一下外部service,最后登录尝试http://192.168.249.146:32735/nacos/
,然后修改里面mysql和redis的连接ip,换成DNS域名,同时账号密码设置一下。这里集群模式的话需要版本v2.1.0,也可以直接单节点模式启动(单节点增加环境变量MODE=standalone
,如果集群的话需要增加环境变量PREFER_HOST_MODE=hostname
,具体可以参考nacos dockerhub)
下面的Dockerfile文件需要放置在每一个项目根目录,方便打包,官方可以参考:https://gitee.com/y_project/RuoYi-Cloud/tree/v3.6.1/docker
# 规则: # 容器默认以8080端口启动 # 时间为CST # 环境变量 PARAMS 可以动态指定配置文件中任意的值 # nacos集群内地址为 his-nacos.his:8848 # 微服务默认启动加载 nacos中 服务名-激活的环境.yml 文件,所以线上的配置可以全部写在nacos中 FROM openjdk:8-jdk LABEL maintainer=shawn # 注意这里要登陆nacos创建prod命名空间。然后将dev的配置文件克隆到prod,同时改dev后缀为prod #docker run -e PARAMS="--server.port 9090" ENV PARAMS="--server.port=8080 --spring.profiles.active=prod --spring.cloud.nacos.discovery.server-addr=his-nacos.his:8848 --spring.cloud.nacos.config.server-addr=his-nacos.his:8848 --spring.cloud.nacos.config.namespace=prod --spring.cloud.nacos.config.file-extension=yml" RUN /bin/cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && echo 'Asia/Shanghai' >/etc/timezone COPY target/*.jar /app.jar EXPOSE 8080 # ENTRYPOINT ["/bin/sh","-c","java -Dfile.encoding=utf8 -Djava.security.egd=file:/dev/./urandom -jar app.jar ${PARAMS}"]
打包好docker镜像后可以推送到仓库,也可以搭建自己私有仓库Harbor
docker login --username=forsum**** registry.cn-hangzhou.aliyuncs.com #把本地镜像,改名,成符合阿里云名字规范的镜像。 docker tag [ImageId] registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/镜像名:[镜像版本号] ## docker tag 461955fe1e57 registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-visual-monitor:v1 docker push registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/镜像名:[镜像版本号] ## docker push registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-visual-monitor:v1 # ruoyi所有镜像 docker pull registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-auth:v2 docker pull registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-file:v2 docker pull registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-gateway:v2 docker pull registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-job:v2 docker pull registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-system:v2 docker pull registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-visual-monitor:v2 docker pull registry.cn-hangzhou.aliyuncs.com/lfy_ruoyi/ruoyi-ui:v2
然后开始正式部署微服务层,
项目地址:
https://gitee.com/leifengyang/yygh-parent
https://gitee.com/leifengyang/yygh-admin
https://gitee.com/leifengyang/yygh-site
首先在kubesphere打开devops开关,在原来his的项目中添加其他中间件,sentienl可以选择leifengyang/sentinel:1.8.2,mongodb/RabbitMQ可以从模板创建,ElasticSearch创建与之前方法类似吗
中间件 | 集群内地址 | 外部访问地址 |
---|---|---|
Nacos | his-nacos.his:8848 | http://192.168.249.146:30397/nacos |
MySQL | his-mysql.his:3306 | 192.168.249.146:31840 |
Redis | his-redis.his:6379 | 192.168.249.146:31968 |
Sentinel | his-sentinel.his:8080 | http://192.168.249.146:31229 |
MongoDB | mongodb.his:27017 | 192.168.249.146:30640 |
RabbitMQ | rabbitm-rabbitmq.his:5672 | 192.168.249.146:31018 |
ElasticSearch | his-es.his:9200 | 192.168.249.146:31300 |
然后抽取项目配置,把生产环境分开,项目默认规则
Jenkins官网
构建和部署 Maven 项目参考:https://kubesphere.io/zh/docs/v3.3/devops-user-guide/examples/a-maven-project/
首先在企业空间创建DevOps项目(如果一直处于等待状态,可以尝试重启一下docker或者换个密码),点击进入后创建流水线,编辑流水线,可以先选择一个CI/CD模板,这样就可以看见可视化界面了,然后编辑可视化流水线,最后可以导出Jenkinsfile。这里kubesphere有内置容器和代理,可以参考Jenkins代理文档,然后一步步进行编辑即可。
第一步拉取代码,配置好地址和凭证,可以打印一下输出
第二步项目编译,maven项目打包,mvn clean package -Dmaven.test.skip=true
(如果有弹框要输入,先随便输入),这里我们可以配置maven加速(已经下载过的jar包,下一次流水线的启动,不会重复下载)
第三步打包Docker镜像,将每个jar包打包成镜像
第四步推送最新镜像到镜像仓库,注意改成自己镜像仓库,这里jenkinsfile可以设置环境变量,然后再推送那里进行变量替换
第五步进行镜像部署,注意这一步要创建好kubectl集群凭证和阿里云凭证,然后在每一个项目放置好deploy.yml文件,变量都需要在jenkinsfile里定义好(发布到生产环境可以卡点)
apiVersion: apps/v1 kind: Deployment metadata: labels: app: hospital-manage name: hospital-manage namespace: his #一定要写名称空间 spec: progressDeadlineSeconds: 600 replicas: 1 selector: matchLabels: app: hospital-manage strategy: rollingUpdate: maxSurge: 50% maxUnavailable: 50% type: RollingUpdate template: metadata: labels: app: hospital-manage spec: imagePullSecrets: - name: aliyun-docker-hub #提前在项目下配置访问阿里云的账号密码 containers: - image: $REGISTRY/$ALIYUNHUB_NAMESPACE/hospital-manage:SNAPSHOT-$BUILD_NUMBER # readinessProbe: # httpGet: # path: /actuator/health # port: 8080 # timeoutSeconds: 10 # failureThreshold: 30 # periodSeconds: 5 imagePullPolicy: Always name: app ports: - containerPort: 8080 protocol: TCP resources: limits: cpu: 300m memory: 600Mi terminationMessagePath: /dev/termination-log terminationMessagePolicy: File dnsPolicy: ClusterFirst restartPolicy: Always terminationGracePeriodSeconds: 30 --- apiVersion: v1 kind: Service metadata: labels: app: hospital-manage name: hospital-manage namespace: his spec: ports: - name: http port: 8080 protocol: TCP targetPort: 8080 selector: app: hospital-manage sessionAffinity: None type: ClusterIP
jenkinsfile举例,其中几步可以并发构建,自行填写相应信息
pipeline { agent { node { label 'maven' } } stages { stage('拉取代码') { agent none steps { container('maven') { git(url: 'https://gitee.com/LXT2017/yygh-parent', credentialsId: 'gitee', branch: 'master', changelog: true, poll: false) sh 'ls -la' } } } stage('项目编译') { agent none steps { container('maven') { sh 'ls' sh 'mvn clean package -Dmaven.test.skip=true' } sh 'ls server-gateway/target' } } stage('default-2') { parallel { stage('构建hospital-manage镜像') { steps { container('maven') { sh 'ls hospital-manage/target' sh 'docker build -t hospital-manage:latest -f hospital-manage/Dockerfile ./hospital-manage/' } } } } } stage('default-3') { parallel { stage('推送hospital-manage镜像') { agent none steps { container('maven') { withCredentials([usernamePassword(credentialsId : 'aliyun-docker' ,passwordVariable : 'DOCKER_PWD_VAR' ,usernameVariable : 'DOCKER_USER_VAR' ,)]) { sh 'echo "$DOCKER_PWD_VAR" | docker login $REGISTRY -u "$DOCKER_USER_VAR" --password-stdin' sh 'docker tag hospital-manage:latest $REGISTRY/$DOCKERHUB_NAMESPACE/hospital-manage:SNAPSHOT-$BUILD_NUMBER' sh 'docker push $REGISTRY/$DOCKERHUB_NAMESPACE/hospital-manage:SNAPSHOT-$BUILD_NUMBER' } } } } } } stage('default-4') { parallel{ stage('hospital-manage - 部署到dev环境'){ steps { container ('maven') { withCredentials([ kubeconfigFile( credentialsId: env.KUBECONFIG_CREDENTIAL_ID, variable: 'KUBECONFIG') ]) { sh 'envsubst < hospital-manage/deploy/deploy.yml | kubectl apply -f -' } } } } } } } environment { DOCKER_CREDENTIAL_ID = 'dockerhub-id' GITHUB_CREDENTIAL_ID = 'github-id' KUBECONFIG_CREDENTIAL_ID = 'demo-kubeconfig' REGISTRY = 'registry.cn-hangzhou.aliyuncs.com' DOCKERHUB_NAMESPACE = 'llxxtt' GITHUB_ACCOUNT = 'kubesphere' APP_NAME = 'devops-java-sample' ALIYUNHUB_NAMESPACE = 'llxxtt' } parameters { string(name: 'TAG_NAME', defaultValue: '', description: '') } }
最后一步可以添加邮件功能,kubesphere全局设置在平台设置里配置不同服务(会发生报警监控等),而流水线的邮件服务可以参考:为 KubeSphere 流水线设置电子邮件服务器
其他模块同理
参考
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。