DockerKubernetesJAVA

k8s构建容器化微服务项目

2021-12-23  本文已影响0人  小李飞刀_lql

容器化微服务项目

熟悉Spring Cloud微服务项目

1639991817856.png

源代码编译构建

#安装jdk、maven
[root@prometheus simple-microservice-dev3]# yum install java-1.8.0-openjdk maven

#修改product、stock、order连接数据库地址
url: jdbc:mysql://192.168.153.27:3306/tb_order?characterEncoding=utf-8
url: jdbc:mysql://192.168.153.27:3306/tb_product?characterEncoding=utf-8
url: jdbc:mysql://192.168.153.27:3306/tb_stock?characterEncoding=utf-8

#编译构建
[root@prometheus simple-microservice-dev3]# mvn clean package -Dmaven.test.skip=true

构建项目镜像并推送到镜像仓库

登录harbor

[root@prometheus harbor]# docker login 192.168.153.20

eureka

[root@prometheus eureka-service]# docker build -t 192.168.153.20/ms/eureka:v1 .
[root@prometheus eureka-service]# docker push 192.168.153.20/ms/eureka:v1

K8s中部署Eureka集群

安装Ingress

[root@k8s-m1 k8s]# kubectl apply -f ingress-controller.yaml 
[root@k8s-m1 k8s]#  kubectl get pods -n ingress-nginx -o wide
NAME                                       READY   STATUS   IP               NODE     
nginx-ingress-controller-5dc64b58f-stb5j   1/1     Running  192.168.153.25   k8s-m1   

创建registry-pull-secret

[root@k8s-m1 k8s]# kubectl create secret docker-registry registry-pull-secret --docker-username=admin --docker-password=Harbor12345 --docker-server=192.168.153.20 
secret/registry-pull-secret created

部署eureka集群

[root@k8s-m1 k8s]# kubectl apply -f eureka.yaml 
[root@k8s-m1 k8s]# kubectl get pod,svc -n ms
NAME           READY   STATUS    RESTARTS   AGE
pod/eureka-0   1/1     Running   0          6m55s
pod/eureka-1   1/1     Running   0          2m43s
pod/eureka-2   1/1     Running   0          95s

NAME             TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
service/eureka   ClusterIP   None         <none>        8888/TCP   6m55s
---------------------------------------------------------------------------------
hosts:
192.168.153.27 eureka.ctnrs.com
#访问:
http://eureka.ctnrs.com/

K8s中部署MySQL

部署mysql

#拉起MySQL镜像(:5.7 表示5.7版本)
docker pull mysql:5.7
#运行MySQL容器
docker run -d -p 3306:3306 --privileged=true -v /docker/mysql/conf/my.cnf:/etc/my.cnf -v /docker/mysql/data:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=123456 --name mysql mysql:5.7 --character-set-server=utf8mb4 --collation-server=utf8mb4_general_ci
#参数说明:
run run 是运行一个容器
-d  表示后台运行
-p  表示容器内部端口和服务器端口映射关联
--privileged=true 设值MySQL 的root用户权限, 否则外部不能使用root用户登陆
-v /docker/mysql/conf/my.cnf:/etc/my.cnf 将服务器中的my.cnf配置映射到docker中的/docker/mysql/conf/my.cnf配置
-v /docker/mysql/data:/var/lib/mysql  同上,映射数据库的数据目录, 避免以后docker删除重新运行MySQL容器时数据丢失
-e MYSQL_ROOT_PASSWORD=123456   设置MySQL数据库root用户的密码
--name mysql     设值容器名称为mysql
mysql:5.7  表示从docker镜像mysql:5.7中启动一个容器
--character-set-server=utf8mb4 --collation-server=utf8mb4_general_ci 设值数据库默认编码


#赋予远程登录权限
[root@xdclass ~]# docker exec -it mysql bash  
root@ce7e026432b3:/# mysql -u root -p

mysql> GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '123456' WITH GRANT OPTION;
Query OK, 0 rows affected, 1 warning (0.00 sec)

mysql>  FLUSH PRIVILEGES;

#导入数据
 order.sql  stock.sql  product.sql

在K8s中部署微服务

product

#构建镜像
[root@prometheus product-service-biz]#  docker build -t 192.168.153.20/ms/product:v1 .
#推送镜像
[root@prometheus product-service-biz]#  docker push 192.168.153.20/ms/product:v1
#k8s集群中发布
[root@k8s-m1 k8s]# kubectl apply -f product.yaml 

order

#构建镜像
[root@prometheus order-service-biz]# docker build -t 192.168.153.20/ms/order:v1 .
#推送镜像
[root@prometheus order-service-biz]# docker push 192.168.153.20/ms/order:v1
#k8s集群中发布
[root@k8s-m1 k8s]# kubectl apply -f order.yaml 

stock

#构建镜像
[root@prometheus stock-service-biz]# docker build -t 192.168.153.20/ms/stock:v1 .
#推送镜像
[root@prometheus stock-service-biz]# docker push 192.168.153.20/ms/stock:v1
#k8s集群中发布
[root@k8s-m1 k8s]# kubectl apply -f stock.yaml 

gateway

#构建镜像
[root@prometheus gateway-service]# docker build -t 192.168.153.20/ms/gateway:v1 .
#推送镜像
[root@prometheus gateway-service]# docker push 192.168.153.20/ms/gateway:v1
#k8s集群中发布
[root@k8s-m1 k8s]# kubectl apply -f gateway.yaml 
#hosts
192.168.153.27 gateway.ctnrs.com

portal

#构建镜像
[root@prometheus portal-service]# docker build -t 192.168.153.20/ms/portal:v1 .
#推送镜像
[root@prometheus portal-service]# docker push 192.168.153.20/ms/portal:v1
#k8s集群中发布

portal.ctnrs.com

查看相关服务

[root@k8s-m1 k8s]# kubectl get pod,svc,ing -n ms
NAME                           READY   STATUS    RESTARTS   AGE
pod/eureka-0                   1/1     Running   2          86m
pod/eureka-1                   1/1     Running   2          84m
pod/eureka-2                   1/1     Running   1          83m
pod/gateway-6c7b6f7c85-g9srj   1/1     Running   1          70m
pod/order-65b848c67c-r7stp     1/1     Running   0          6m58s
pod/portal-78ccc5768c-wvt5f    1/1     Running   1          70m
pod/product-59c88fbf7f-snrkf   1/1     Running   0          7m4s
pod/stock-c9b89d8b-p4wvd       1/1     Running   0          6m51s

NAME              TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
service/eureka    ClusterIP   None         <none>        8888/TCP   86m
service/gateway   ClusterIP   10.0.0.101   <none>        9999/TCP   70m
service/portal    ClusterIP   10.0.0.44    <none>        8080/TCP   70m

NAME                                CLASS    HOSTS               ADDRESS   PORTS   AGE
ingress.networking.k8s.io/eureka    <none>   eureka.ctnrs.com              80      86m
ingress.networking.k8s.io/gateway   <none>   gateway.ctnrs.com             80      70m
ingress.networking.k8s.io/portal    <none>   portal.ctnrs.com              80      70m
1640014289523.png 1640058334935.png 1640058349907.png
http://gateway.ctnrs.com/product/queryAllProduct?page=1&limit=10

{"status":200,"msg":"success","result":[{"id":1,"productName":"测试商品1","price":99.99,"stock":99},{"id":2,"productName":"美女","price":999.0,"stock":87},{"id":3,"productName":"Q币","price":100.0,"stock":77},{"id":4,"productName":"貂皮大衣很厚很厚的那种","price":9999.0,"stock":66}]}

http://gateway.ctnrs.com/order/queryAllOrder
{"status":200,"msg":"success","result":[{"id":1,"orderNumber":"0j889r86wo0tng9x","orderProductName":"美女","orderPrice":999.0,"count":1,"buyDate":"2021-12-21T03:40:32.000+0000"}]}

Skywalking

介绍

• 多种监控手段。可以通过语言探针和 service mesh 获得监控是数据。
• 多个语言自动探针。包括 Java,.NET Core 和 Node.JS。
• 轻量高效。无需大数据平台,和大量的服务器资源。
• 模块化。UI、存储、集群管理都有多种机制可选。
• 支持告警。
• 优秀的可视化解决方案

架构

1640089757346.png

部署

部署ES数据库

docker run --name elasticsearch -p 9200:9200 -e "discovery.type=single-node" -d elasticsearch:7.7.0

部署Skywalking OAP

[root@k8s-m1 ~]# yum install java-11-openjdk –y
[root@k8s-m1 ~]# tar zxvf apache-skywalking-apm-es7-8.3.0.tar.gz
[root@k8s-m1 ~]# cd apache-skywalking-apm-bin-es7/
[root@k8s-m1 ~]# vi config/application.yml
storage:
selector: ${SW_STORAGE:elasticsearch7} #这里使用elasticsearch7
...
elasticsearch7:
nameSpace: ${SW_NAMESPACE:""}
clusterNodes: ${SW_STORAGE_ES_CLUSTER_NODES:192.168.0.10:9200} # 指定ES地址

#启动OAP和UI:
[root@k8s-m1 bin]# ./startup.sh 
SkyWalking OAP started successfully!
SkyWalking Web Application started successfully!
#访问UI:
http://192.168.153.25:8080


#collector.backend_service为Skywalking服务器,11800端口复制收集数据
[root@k8s-m1 agent]# ss -antp|grep 11800
LISTEN     0      128         :::11800                   :::*                   users:(("java",pid=59156,fd=269))

1640092815351.png

Dockerfile

#启动Java程序以探针方式集成Agent(以eureka为例),每个服务都要加,重新构建:

java -jar -javaagent:/skywalking/skywalking-agent.jar=agent.service_name=ms-eureka,agent.instance_name=$(echo $HOSTNAME | awk -F- '{print $1"-"$NF}'),
collector.backend_service=192.168.153.25:11800 -Deureka.instance.hostname=${MY_POD_NAME}.eureka.ms /eureka-service.jar    


构建发布

#启动mysql服务
docker start mysql

#启动es
docker start elasticsearch

#启动Skywalking OAP和UI:
[root@k8s-m1 bin]# ./startup.sh 
SkyWalking OAP started successfully!
SkyWalking Web Application started successfully!
#访问UI:
http://192.168.153.25:8080


#collector.backend_service为Skywalking服务器,11800端口复制收集数据
[root@k8s-m1 agent]# ss -antp|grep 11800
LISTEN     0      128         :::11800                   :::*                   users:(("java",pid=59156,fd=269))

#修改Dockerfile重新构建推送
docker build -t 192.168.153.20/ms/eureka:v2 .
docker push 192.168.153.20/ms/eureka:v2
......

#k8s发布过程中出现oomkill问题
解决:将limit设置的更大

[root@k8s-m1 k8s]# kubectl get pod,svc,ing -n ms
NAME                           READY   STATUS    RESTARTS   AGE
pod/eureka-0                   1/1     Running   0          62m
pod/eureka-1                   1/1     Running   0          61m
pod/eureka-2                   1/1     Running   0          60m
pod/gateway-77776889-r29dt     1/1     Running   0          34m
pod/order-846f7c95b9-dpqh8     1/1     Running   0          30m
pod/portal-66cf475fc4-9ww57    1/1     Running   1          49m
pod/product-554d7d554c-6g87b   1/1     Running   0          30m
pod/stock-546b455df8-nblxn     1/1     Running   0          30m

NAME              TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)    AGE
service/eureka    ClusterIP   None         <none>        8888/TCP   62m
service/gateway   ClusterIP   10.0.0.173   <none>        9999/TCP   34m
service/portal    ClusterIP   10.0.0.94    <none>        8080/TCP   49m

NAME                                CLASS    HOSTS               ADDRESS   PORTS   AGE
ingress.networking.k8s.io/eureka    <none>   eureka.ctnrs.com              80      62m
ingress.networking.k8s.io/gateway   <none>   gateway.ctnrs.com             80      34m
ingress.networking.k8s.io/portal    <none>   portal.ctnrs.com              80      49m

效果展示

1640175090524.png 1640176967598.png

生产环境踩坑经验分享

限制了容器资源,还经常被杀死?

在JAVA1.9版本之前,是不能自动发现docker设置的内存限制,随着应用负载起伏就会造成内存使用过大,超过limits限制,从而触发K8s杀掉该容器。

解决办法:
• 手动指定JVM堆内存大小

CMD java -jar $JAVA_OPTS /gateway-service.jar
env:
  - name: JAVA_OPTS
    value: "-Xmx1g"
resources:
  requests:
    cpu: 0.5
    memory: 256Mi
  limits:
    cpu: 1
    memory: 1Gi

滚动更新期间造成流量丢失

滚动更新触发,Pod在删除过程中,有些节点kube-proxy还没来得及同步iptables规则,从而部分流量请求到Terminating的Pod上,导致请求出错。
解决办法:配置preStop回调,在容器终止前优雅暂停5秒,给kube-proxy多预留一点时间

lifecycle:
  preStop:
    exec:
      command:
      - sh
      - -c
      - "sleep 5"
      
还可以做一些回调处理,curl......      

滚动更新之健康检查重要性

滚动更新是默认发布策略,当配置健康检查时,滚动更新会根据Probe状态来决定是否继续更新以及是否允许接入流量,这样在整个滚动更新过程中可保证始终会有可用的Pod存在,达到平滑升级
readinessProbe:
  tcpSocket:
    port: 9999
  initialDelaySeconds: 60
  periodSeconds: 10
livenessProbe:
  tcpSocket:
    port: 9999
  initialDelaySeconds: 60
  periodSeconds: 10
上一篇下一篇

猜你喜欢

热点阅读