当前位置:   article > 正文

【云原生 | Kubernetes 系列】dubbo架构_k8s部署dubbo服务

k8s部署dubbo服务

1. provider 镜像制作

Dockerfile

#Dubbo provider
FROM harbor.intra.com/pub-images/jdk-base:v8.212 

RUN yum install file nc -y
RUN mkdir -p /apps/dubbo/provider
ADD dubbo-demo-provider-2.1.5/  /apps/dubbo/provider
ADD run_java.sh /apps/dubbo/provider/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/provider/bin/*.sh

CMD ["/apps/dubbo/provider/bin/run_java.sh"]
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

build脚本

#!/bin/bash
docker build -t harbor.intra.com/wework/dubbo-demo-provider:v1  .
sleep 3
docker push harbor.intra.com/wework/dubbo-demo-provider:v1
  • 1
  • 2
  • 3
  • 4

修改配置文件
ping 通zookeeper的svc

[root@wordpress-app-deployment-67bc78cf9-cn5cf /]# ping zookeeper1.wework.svc.magedu.local
PING zookeeper1.wework.svc.magedu.local (10.200.167.230) 56(84) bytes of data.
64 bytes from zookeeper1.wework.svc.magedu.local (10.200.167.230): icmp_seq=1 ttl=64 time=0.086 ms
[root@wordpress-app-deployment-67bc78cf9-cn5cf /]# ping zookeeper2.wework.svc.magedu.local -c 1
PING zookeeper2.wework.svc.magedu.local (10.200.36.129) 56(84) bytes of data.
64 bytes from zookeeper2.wework.svc.magedu.local (10.200.36.129): icmp_seq=1 ttl=64 time=0.065 ms
[root@wordpress-app-deployment-67bc78cf9-cn5cf /]# ping zookeeper3.wework.svc.magedu.local -c 1
PING zookeeper3.wework.svc.magedu.local (10.200.190.129) 56(84) bytes of data.
64 bytes from zookeeper3.wework.svc.magedu.local (10.200.190.129): icmp_seq=1 ttl=64 time=0.064 ms
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

修改dubbo-demo-provider-2.1.5/conf/dubbo.properties

dubbo.container=log4j,spring
dubbo.application.name=demo-provider
dubbo.application.owner=
#dubbo.registry.address=multicast://224.5.6.7:1234
dubbo.registry.address=zookeeper://zookeeper1.wework.svc.magedu.local:2181 | zookeeper://zookeeper2.wework.svc.magedu.local:2181 | zookeeper://zookeeper3.wework.svc.magedu.local:2181
#dubbo.registry.address=redis://127.0.0.1:6379
#dubbo.registry.address=dubbo://127.0.0.1:9090
dubbo.monitor.protocol=registry
dubbo.protocol.name=dubbo
dubbo.protocol.port=20880
dubbo.log4j.file=logs/dubbo-demo-provider.log
dubbo.log4j.level=WARN
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

构建镜像

root@k8s-master-01:/opt/k8s-data/dockerfile/web/wework/dubbo/provider# ./build-command.sh 
Successfully built 9fc1b5f5d563
Successfully tagged harbor.intra.com/wework/dubbo-demo-provider:v1
The push refers to repository [harbor.intra.com/wework/dubbo-demo-provider]
0ee8c7ff4613: Pushed 
ce6d48934134: Pushed 
6e95248e3e50: Pushed 
0fdc61076c5e: Pushed 
c9d9ddb0c6ce: Pushed 
2f69d34236bc: Pushed 
aadaa9679cb8: Mounted from wework/jenkins 
fc305a4ba468: Mounted from wework/jenkins 
ab93afc6a659: Mounted from wework/jenkins 
d7f831641e18: Mounted from wework/jenkins 
f4b52134c525: Mounted from wework/jenkins 
0533300cca03: Mounted from wework/jenkins 
30a12549c4a3: Mounted from wework/jenkins 
ce1fb445c72c: Mounted from wework/jenkins 
174f56854903: Mounted from wework/wordpress-php-5.6 
v1: digest: sha256:d2b539a3a2ef6b1bd7b3d56e9167959671d06c7cbb28196af8ffee889ce292ef size: 3467
## 测试
root@k8s-master-01:/opt/k8s-data/dockerfile/web/wework/dubbo/provider# docker run -it harbor.intra.com/wework/dubbo-demo-provider:v1 bash
[root@546bc122aa8d /]# ll -lh /apps/dubbo/provider/bin/*.sh
-rwxr-xr-x 1 nginx nginx 2.2K Apr 13  2012 /apps/dubbo/provider/bin/dump.sh
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24

2. provides k8s部署

provider.yaml

kind: Deployment
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
metadata:
  labels:
    app: wework-provider
  name: wework-provider-deployment
  namespace: wework
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wework-provider
  template:
    metadata:
      labels:
        app: wework-provider
    spec:
      containers:
      - name: wework-provider-container
        image: harbor.intra.com/wework/dubbo-demo-provider:v1
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 20880
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: wework-provider
  name: wework-provider-spec
  namespace: wework
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 20880
    #nodePort: 30001
  selector:
    app: wework-provider
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47

provides deployment部署

root@k8s-master-01:/opt/k8s-data/yaml/web/wework/dubbo/provider# kubectl apply -f provider.yaml
deployment.apps/wework-provider-deployment created
service/wework-provider-spec created
root@k8s-master-01:/opt/k8s-data/yaml/web/wework/dubbo/provider# kubectl get pods -n wework
NAME                                         READY   STATUS    RESTARTS   AGE
mysql-0                                      2/2     Running   0          7h5m
mysql-1                                      2/2     Running   0          7h5m
mysql-2                                      2/2     Running   0          7h5m
mysql-3                                      2/2     Running   0          7h5m
wework-jenkins-deployment-5697fd66cf-mw8dl   1/1     Running   0          6h10m
wework-provider-deployment-d5d886998-n2xdn   1/1     Running   0          61s
wordpress-app-deployment-67bc78cf9-cn5cf     2/2     Running   0          4h18m
zookeeper1-699d46468c-62nfk                  1/1     Running   0          19h
zookeeper2-7cc484778-fl594                   1/1     Running   0          19h
zookeeper3-cdf484f7c-bb9fr                   1/1     Running   0          19h
root@k8s-master-01:/opt/k8s-data/yaml/web/wework/dubbo/provider# kubectl get svc -n wework
NAME                         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                        AGE
mysql                        ClusterIP   None             <none>        3306/TCP                                       24h
mysql-read                   ClusterIP   10.200.195.5     <none>        3306/TCP                                       24h
wework-jenkins-service       NodePort    10.200.71.97     <none>        80:38080/TCP                                   6h48m
wework-nginx-service         NodePort    10.200.89.252    <none>        80:30090/TCP,443:30091/TCP                     4d2h
wework-provider-spec         NodePort    10.200.58.29     <none>        80:50515/TCP                                   89s
wework-tomcat-app1-service   ClusterIP   10.200.21.158    <none>        80/TCP                                         3d7h
wordpress-app-spec           NodePort    10.200.171.213   <none>        80:30031/TCP,443:30033/TCP                     4h18m
zookeeper                    ClusterIP   10.200.117.19    <none>        2181/TCP                                       2d5h
zookeeper1                   NodePort    10.200.167.230   <none>        2181:32181/TCP,2888:31774/TCP,3888:56670/TCP   2d5h
zookeeper2                   NodePort    10.200.36.129    <none>        2181:32182/TCP,2888:46321/TCP,3888:30984/TCP   2d5h
zookeeper3                   NodePort    10.200.190.129   <none>        2181:32183/TCP,2888:61447/TCP,3888:51393/TCP   2d5h

## 查看容器状态是否正常
root@k8s-master-01:/opt/k8s-data/yaml/web/wework/dubbo/provider# kubectl logs wework-provider-deployment-d5d886998-n2xdn -n wework
Ncat: Idle timeout expired (1000 ms).
Ncat: Idle timeout expired (1000 ms).
Ncat: Idle timeout expired (1000 ms).
Ncat: Idle timeout expired (1000 ms).
Ncat: Idle timeout expired (1000 ms).
Ncat: Idle timeout expired (1000 ms).
Starting the demo-provider .......................................................OK!
PID: 58
STDOUT: logs/stdout.log
# Kubernetes-managed hosts file.
127.0.0.1	localhost
::1	localhost ip6-localhost ip6-loopback
fe00::0	ip6-localnet
fe00::0	ip6-mcastprefix
fe00::1	ip6-allnodes
fe00::2	ip6-allrouters
172.100.140.99	wework-provider-deployment-d5d886998-n2xdn
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48

3. Consumer 镜像制作

Dockerfile

#Dubbo consumer
FROM harbor.intra.com/pub-images/jdk-base:v8.212 

RUN yum install file -y
RUN mkdir -p /apps/dubbo/consumer 
ADD dubbo-demo-consumer-2.1.5  /apps/dubbo/consumer
ADD run_java.sh /apps/dubbo/consumer/bin 
RUN chown nginx.nginx /apps -R
RUN chmod a+x /apps/dubbo/consumer/bin/*.sh

CMD ["/apps/dubbo/consumer/bin/run_java.sh"]
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

build文件

#!/bin/bash
docker build -t harbor.intra.com/wework/dubbo-demo-consumer:v1  .
sleep 3
docker push harbor.intra.com/wework/dubbo-demo-consumer:v1
  • 1
  • 2
  • 3
  • 4

dubbo配置文件

dubbo.container=log4j,spring
dubbo.application.name=demo-consumer
dubbo.application.owner=
#dubbo.registry.address=multicast://224.5.6.7:1234
dubbo.registry.address=zookeeper://zookeeper1.wework.svc.magedu.local:2181 | zookeeper://zookeeper2.wework.svc.magedu.local:2181 | zookeeper://zookeeper3.wework.svc.magedu.local:2181
#dubbo.registry.address=redis://127.0.0.1:6379
#dubbo.registry.address=dubbo://127.0.0.1:9090
dubbo.monitor.protocol=registry
dubbo.log4j.file=logs/dubbo-demo-consumer.log
dubbo.log4j.level=WARN
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10

构建镜像

root@k8s-master-01:/opt/k8s-data/dockerfile/web/wework/dubbo/consumer# ./build-command.sh 
Sending build context to Docker daemon  21.84MB
ce1fb445c72c: Mounted from wework/dubbo-demo-provider 
174f56854903: Mounted from wework/dubbo-demo-provider 
v1: digest: sha256:2ae544fea359abd06f3b79e6818bb29b5583ffdcac8762f9969893279063466e size: 3467
root@k8s-master-01:/opt/k8s-data/dockerfile/web/wework/dubbo/consumer# docker images
REPOSITORY                                            TAG             IMAGE ID       CREATED          SIZE
harbor.intra.com/wework/dubbo-demo-consumer           v1              8da9abdb490a   33 seconds ago   1.71GB
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8

4. consumer k8s部署

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    app: wework-consumer
  name: wework-consumer-deployment
  namespace: wework
spec:
  replicas: 1
  selector:
    matchLabels:
      app: wework-consumer
  template:
    metadata:
      labels:
        app: wework-consumer
    spec:
      containers:
      - name: wework-consumer-container
        image: harbor.intra.com/wework/dubbo-demo-consumer:v1 
        #command: ["/apps/tomcat/bin/run_tomcat.sh"]
        #imagePullPolicy: IfNotPresent
        imagePullPolicy: Always
        ports:
        - containerPort: 80
          protocol: TCP
          name: http

---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: wework-consumer
  name: wework-consumer-server
  namespace: wework
spec:
  type: NodePort
  ports:
  - name: http
    port: 80
    protocol: TCP
    targetPort: 80
    #nodePort: 30001
  selector:
    app: wework-consumer
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46

部署consumer Deployment

root@k8s-master-01:/opt/k8s-data/yaml/web/wework/dubbo/consumer# kubectl apply -f consumer.yaml 
deployment.apps/wework-consumer-deployment created
service/wework-consumer-server created
root@k8s-master-01:/opt/k8s-data/yaml/web/wework/dubbo/consumer# kubectl get pods -n wework
NAME                                          READY   STATUS    RESTARTS   AGE
mysql-0                                       2/2     Running   0          7h50m
mysql-1                                       2/2     Running   0          7h49m
mysql-2                                       2/2     Running   0          7h49m
mysql-3                                       2/2     Running   0          7h49m
wework-consumer-deployment-5ffd465f85-gbk49   1/1     Running   0          8s
wework-jenkins-deployment-5697fd66cf-mw8dl    1/1     Running   0          6h54m
wework-provider-deployment-d5d886998-n2xdn    1/1     Running   0          45m
wordpress-app-deployment-67bc78cf9-cn5cf      2/2     Running   0          5h2m
zookeeper1-699d46468c-62nfk                   1/1     Running   0          20h
zookeeper2-7cc484778-fl594                    1/1     Running   0          20h
zookeeper3-cdf484f7c-bb9fr                    1/1     Running   0          20h
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16

在provide POD里监控/apps/dubbo/provider/logs/*.log,发现consumer已经连上

请添加图片描述

5. 增加pod数量

调整provider的数量

root@k8s-master-01:/opt/k8s-data/yaml/wework/zookeeper# kubectl scale --replicas=2 -n wework deployment wework-provider-deployment
deployment.apps/wework-provider-deployment scaled

root@k8s-master-01:/opt/k8s-data/yaml/wework/zookeeper# kubectl get pods -n wework 
NAME                                          READY   STATUS    RESTARTS   AGE
mysql-0                                       2/2     Running   0          8h
mysql-1                                       2/2     Running   0          8h
mysql-2                                       2/2     Running   0          8h
mysql-3                                       2/2     Running   0          8h
wework-consumer-deployment-5ffd465f85-gbk49   1/1     Running   0          19m
wework-jenkins-deployment-5697fd66cf-mw8dl    1/1     Running   0          7h13m
wework-provider-deployment-d5d886998-2zhdx    1/1     Running   0          41s
wework-provider-deployment-d5d886998-n2xdn    1/1     Running   0          64m
wordpress-app-deployment-67bc78cf9-cn5cf      2/2     Running   0          5h21m
zookeeper1-699d46468c-62nfk                   1/1     Running   0          20h
zookeeper2-7cc484778-fl594                    1/1     Running   0          20h
zookeeper3-cdf484f7c-bb9fr                    1/1     Running   0          20h
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17

此时consumer上就会看到2个provider的记录

[root@wework-consumer-deployment-5ffd465f85-gbk49 /]# cd /a
anaconda-post.log  apps/              
[root@wework-consumer-deployment-5ffd465f85-gbk49 /]# cd /apps/dubbo/consumer/logs/
[root@wework-consumer-deployment-5ffd465f85-gbk49 logs]# tail -f *.log
==> dubbo-demo-consumer.log <==
        at com.alibaba.dubbo.common.bytecode.proxy1.count(proxy1.java)
        at com.alibaba.dubbo.monitor.dubbo.DubboMonitor.send(DubboMonitor.java:112)
        at com.alibaba.dubbo.monitor.dubbo.DubboMonitor$1.run(DubboMonitor.java:69)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)

==> stdout.log <==
[16:54:39] Hello world581, response form provider: 172.100.76.168:20880
[16:54:41] Hello world582, response form provider: 172.100.140.99:20880
[16:54:43] Hello world583, response form provider: 172.100.76.168:20880
[16:54:45] Hello world584, response form provider: 172.100.140.99:20880
[16:54:47] Hello world585, response form provider: 172.100.76.168:20880
[16:54:49] Hello world586, response form provider: 172.100.140.99:20880
[16:54:51] Hello world587, response form provider: 172.100.76.168:20880
[16:54:53] Hello world588, response form provider: 172.100.140.99:20880
[16:54:55] Hello world589, response form provider: 172.100.76.168:20880
[16:54:57] Hello world590, response form provider: 172.100.140.99:20880
[16:54:59] Hello world591, response form provider: 172.100.76.168:20880
[16:55:01] Hello world592, response form provider: 172.100.140.99:20880
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29

当减少一个provider时,注册中心watch到变化,通知consumer,consumer就不再调用这个provider了

root@k8s-master-01:/opt/k8s-data/yaml/wework/zookeeper# kubectl scale --replicas=1 -n wework deployment wework-provider-deployment
deployment.apps/wework-provider-deployment scaled
root@k8s-master-01:/opt/k8s-data/yaml/wework/zookeeper# kubectl get pods -n wework 
NAME                                          READY   STATUS        RESTARTS   AGE
mysql-0                                       2/2     Running       0          8h
mysql-1                                       2/2     Running       0          8h
mysql-2                                       2/2     Running       0          8h
mysql-3                                       2/2     Running       0          8h
wework-consumer-deployment-5ffd465f85-gbk49   1/1     Running       0          22m
wework-jenkins-deployment-5697fd66cf-mw8dl    1/1     Running       0          7h17m
wework-provider-deployment-d5d886998-2zhdx    1/1     Terminating   0          4m15s
wework-provider-deployment-d5d886998-n2xdn    1/1     Running       0          68m
wordpress-app-deployment-67bc78cf9-cn5cf      2/2     Running       0          5h25m
zookeeper1-699d46468c-62nfk                   1/1     Running       0          20h
zookeeper2-7cc484778-fl594                    1/1     Running       0          20h
zookeeper3-cdf484f7c-bb9fr                    1/1     Running       0          20h
root@k8s-master-01:/opt/k8s-data/yaml/wework/zookeeper# kubectl get pods -n wework 
NAME                                          READY   STATUS    RESTARTS   AGE
mysql-0                                       2/2     Running   0          8h
mysql-1                                       2/2     Running   0          8h
mysql-2                                       2/2     Running   0          8h
mysql-3                                       2/2     Running   0          8h
wework-consumer-deployment-5ffd465f85-gbk49   1/1     Running   0          22m
wework-jenkins-deployment-5697fd66cf-mw8dl    1/1     Running   0          7h17m
wework-provider-deployment-d5d886998-n2xdn    1/1     Running   0          68m
wordpress-app-deployment-67bc78cf9-cn5cf      2/2     Running   0          5h25m
zookeeper1-699d46468c-62nfk                   1/1     Running   0          20h
zookeeper2-7cc484778-fl594                    1/1     Running   0          20h
zookeeper3-cdf484f7c-bb9fr                    1/1     Running   0          20h
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
Caused by: java.net.ConnectException: connection timed out
        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink$Boss.processConnectTimeout(NioClientSocketPipelineSink.java:371)
        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink$Boss.run(NioClientSocketPipelineSink.java:283)
        ... 3 more
2022-08-12 16:57:53,127 [DubboClientReconnectTimer-thread-2] WARN  com.alibaba.dubbo.remoting.transport.AbstractClient$1 (AbstractClient.java:158) -  [DUBBO] client reconnect to 172.100.76.168:20880 find error . url: dubbo://172.100.76.168:20880/com.alibaba.dubbo.demo.DemoService?anyhost=true&application=demo-consumer&check=false&codec=dubbo&dubbo=2.1.5&heartbeat=60000&interface=com.alibaba.dubbo.demo.DemoService&loadbalance=roundrobin&methods=sayHello&monitor=dubbo%3A%2F%2Fzookeeper1.wework.svc.magedu.local%3A2181%2Fcom.alibaba.dubbo.registry.RegistryService%3Fapplication%3Ddemo-consumer%26dubbo%3D2.1.5%26pid%3D54%26protocol%3Dregistry%26refer%3Ddubbo%253D2.1.5%2526interface%253Dcom.alibaba.dubbo.monitor.MonitorService%2526pid%253D54%2526timestamp%253D1660293315204%26registry%3Dzookeeper%26timestamp%3D1660293315195&pid=54&revision=2.1.5&timestamp=1660293315151, dubbo version: 2.1.5, current host: 172.100.109.110
com.alibaba.dubbo.remoting.RemotingException: client(url: dubbo://172.100.76.168:20880/com.alibaba.dubbo.demo.DemoService?anyhost=true&application=demo-consumer&check=false&codec=dubbo&dubbo=2.1.5&heartbeat=60000&interface=com.alibaba.dubbo.demo.DemoService&loadbalance=roundrobin&methods=sayHello&monitor=dubbo%3A%2F%2Fzookeeper1.wework.svc.magedu.local%3A2181%2Fcom.alibaba.dubbo.registry.RegistryService%3Fapplication%3Ddemo-consumer%26dubbo%3D2.1.5%26pid%3D54%26protocol%3Dregistry%26refer%3Ddubbo%253D2.1.5%2526interface%253Dcom.alibaba.dubbo.monitor.MonitorService%2526pid%253D54%2526timestamp%253D1660293315204%26registry%3Dzookeeper%26timestamp%3D1660293315195&pid=54&revision=2.1.5&timestamp=1660293315151) failed to connect to server /172.100.76.168:20880, error message is:connection timed out
        at com.alibaba.dubbo.remoting.transport.netty.NettyClient.doConnect(NettyClient.java:123)
        at com.alibaba.dubbo.remoting.transport.AbstractClient.connect(AbstractClient.java:278)
        at com.alibaba.dubbo.remoting.transport.AbstractClient.access$000(AbstractClient.java:50)
        at com.alibaba.dubbo.remoting.transport.AbstractClient$1.run(AbstractClient.java:143)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
        at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.net.ConnectException: connection timed out
        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink$Boss.processConnectTimeout(NioClientSocketPipelineSink.java:371)
        at org.jboss.netty.channel.socket.nio.NioClientSocketPipelineSink$Boss.run(NioClientSocketPipelineSink.java:283)
        at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
        at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:44)
        ... 3 more

==> stdout.log <==
[16:57:54] Hello world678, response form provider: 172.100.140.99:20880
[16:57:56] Hello world679, response form provider: 172.100.140.99:20880
[16:57:58] Hello world680, response form provider: 172.100.140.99:20880
[16:58:00] Hello world681, response form provider: 172.100.140.99:20880
[16:58:02] Hello world682, response form provider: 172.100.140.99:20880
[16:58:04] Hello world683, response form provider: 172.100.140.99:20880
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家自动化/article/detail/169520
推荐阅读
相关标签
  

闽ICP备14008679号