k8s

K8S集群化部署

2021-01-30  本文已影响0人  OPS_Joy

1.基础环境,2核+4G内存(CPU不能低于2核)
k8s-m1 192.168.66.110
k8s-m2 192.168.66.111
k8s-m3 192.168.66.112
虚拟IP设定为:192.168.66.166
2.三台初始化配置,分别在三台上操作

vim /etc/hosts
192.168.66.110 k8s-m1
192.168.66.111 k8s-m2
192.168.66.112 k8s-m3
hostnamectl set-hostname k8s-m1
hostnamectl set-hostname k8s-m2
hostnamectl set-hostname k8s-m3

关闭防火墙 SELINUX

systemctl stop firewalld && systemctl disable firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disable/' /etc/selinux/config

关闭swap

swapoff -a && sysctl -w vm.swappiness=0
sed -i 's/.*swap.*/#&/g' /etc/fstab

设置Docker所需参数

cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

加载ip_vs模块

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

安装docker19.03版本

yum -y install yum-utils device-mapper-persistent-data lvm2 wget epel-release ipvsadm vim ntpdate
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce-19.03.9
systemctl enable docker && systemctl start docker
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": ["https://gco4rcsp.mirror.aliyuncs.com"],
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "3"
  }
}
EOF
systemctl daemon-reload && systemctl restart docker

安装kube组件

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

cfssl下载,在m1上操作

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
mkdir -p /etc/etcd/ssl && cd /etc/etcd/ssl

etcd ca配置

cat << EOF | tee ca-config.json
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "etcd": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

etcd ca证书

cat << EOF | tee ca-csr.json
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

生成 CA 凭证和私钥

cfssl gencert -initca ca-csr.json | cfssljson -bare ca

etcd server证书

cat << EOF | tee server-csr.json
{
    "CN": "etcd",
    "hosts": [
    "192.168.66.110",
    "192.168.66.111",
    "192.168.66.112"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing"
        }
    ]
}
EOF

生成server证书

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=etcd server-csr.json | cfssljson -bare server

查看目录文件,将4个pem文件拷贝至其他两台主节点,目录一致

[root@k8s-m1 ssl]# ll
总用量 36
-rw-r--r--. 1 root root  288 1月  30 13:45 ca-config.json
-rw-r--r--. 1 root root  956 1月  30 13:45 ca.csr
-rw-r--r--. 1 root root  209 1月  30 13:45 ca-csr.json
-rw-------. 1 root root 1679 1月  30 13:45 ca-key.pem
-rw-r--r--. 1 root root 1265 1月  30 13:45 ca.pem
-rw-r--r--. 1 root root 1013 1月  30 13:47 server.csr
-rw-r--r--. 1 root root  293 1月  30 13:47 server-csr.json
-rw-------. 1 root root 1679 1月  30 13:47 server-key.pem
-rw-r--r--. 1 root root 1338 1月  30 13:47 server.pem

etcd安装,三台主节点都需要操作,配置文件修改相应的地方

wget https://github.com/etcd-io/etcd/releases/download/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz
tar -zxf etcd-v3.3.12-linux-amd64.tar.gz
cd etcd-v3.3.12-linux-amd64 && cp etcd* /usr/local/bin/

配置etcd主文件

cat << EOF | tee /etc/etcd/etcd.conf
ETCD_NAME="etcd01"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.66.110:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.66.110:2379,http://127.0.0.1:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.66.110:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.66.110:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.66.110:2380,etcd02=https://192.168.66.111:2380,etcd03=https://192.168.66.112:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
#[Security]
ETCD_CERT_FILE="/etc/etcd/ssl/server.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/server-key.pem"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/server.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/server-key.pem"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
EOF

配置启动脚本

cat << EOF | tee /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/etc/etcd/config
ExecStart=/usr/local/bin/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS} \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=${ETCD_INITIAL_CLUSTER_STATE}  \
--cert-file=${ETCD_CERT_FILE} \
--key-file=${ETCD_KEY_FILE} \
--peer-cert-file=${ETCD_PEER_CERT_FILE} \
--peer-key-file=${ETCD_PEER_KEY_FILE} \
--trusted-ca-file=${ETCD_TRUSTED_CA_FILE} \
--client-cert-auth=${ETCD_CLIENT_CERT_AUTH} \
--peer-client-cert-auth=${ETCD_PEER_CLIENT_CERT_AUTH} \
--peer-trusted-ca-file=${ETCD_PEER_TRUSTED_CA_FILE}
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

启动服务

systemctl daemon-reload && systemctl enable etcd && systemctl start etcd

三台主节点查看启动服务

[root@k8s-m2 etcd]# netstat -ntlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1290/master         
tcp        0      0 192.168.66.111:2379     0.0.0.0:*               LISTEN      1853/etcd           
tcp        0      0 127.0.0.1:2379          0.0.0.0:*               LISTEN      1853/etcd           
tcp        0      0 192.168.66.111:2380     0.0.0.0:*               LISTEN      1853/etcd           
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      1036/sshd           
tcp6       0      0 ::1:25                  :::*                    LISTEN      1290/master         
tcp6       0      0 :::22                   :::*                    LISTEN      1036/sshd

查看etcd集群是否健康

[root@k8s-m2 etcd]# etcdctl --ca-file=/etc/etcd/ssl/ca.pem --cert-file=/etc/etcd/ssl/server.pem --key-file=/etc/etcd/ssl/server-key.pem --endpoints="https://192.168.66.110:2379,https://192.168.66.111:2379,https://192.168.66.112:2379" cluster-health
member 1c7f4fcaf93a4f89 is healthy: got healthy result from https://192.168.66.111:2379
member 2236499add1299a8 is healthy: got healthy result from https://192.168.66.110:2379
member b2ca950c7544a007 is healthy: got healthy result from https://192.168.66.112:2379
cluster is healthy

三台主节点安装haproxy和keepalived,注意修改网卡名字和虚拟IP,其他两节点state修改为BACKUP
第二节点和第三节点priority修改为90和80

yum -y install haproxy keepalived
cat > /etc/keepalived/keepalived.conf << EOF 
vrrp_script check_haproxy {
    script "/etc/keepalived/check_haproxy.sh"
    interval 3
}
vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 51
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    virtual_ipaddress {
    192.168.66.166
    }
     track_script {
        check_haproxy
     }
}
EOF

haproxy检测脚本

cat > /etc/keepalived/check_haproxy.sh <<EOF
#!/bin/bash
systemctl status haproxy > /dev/null
if [[ \$? != 0 ]];then
        echo "haproxy is down,close the keepalived"
        systemctl stop keepalived
fi
EOF

haproxy配置文件

cat > /etc/haproxy/haproxy.cfg << EOF 
global
    log         127.0.0.1 local2
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
frontend  k8s-api 
   bind *:8443
   mode tcp
   default_backend             apiserver
#---------------------------------------------------------------------
backend apiserver
    balance     roundrobin
    mode tcp
    server  k8s-m1 192.168.66.110:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-m2 192.168.66.111:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
    server  k8s-m3 192.168.66.112:6443 check weight 1 maxconn 2000 check inter 2000 rise 2 fall 3
EOF

启动keepalived和haproxy

systemctl enable --now keepalived haproxy

分别查看IP地址,查看主节点

[root@k8s-m1 ssl]# ip addr
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:e3:b4:a9 brd ff:ff:ff:ff:ff:ff
    inet 192.168.66.110/24 brd 192.168.66.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.66.166/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::78b1:b1f2:9042:937d/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

[root@k8s-m2 member]# ip addr
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:05:0f:0e brd ff:ff:ff:ff:ff:ff
    inet 192.168.66.111/24 brd 192.168.66.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::b31c:59f5:e055:92ec/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

[root@k8s-m3 ~]# ip addr
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:19:10:b6 brd ff:ff:ff:ff:ff:ff
    inet 192.168.66.112/24 brd 192.168.66.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::4966:3801:d39c:875b/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

在m1上操作,kubeadm初始化,编辑文件,修改为国内源

[root@k8s-m1 ssl]# vim /etc/kubernetes/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
controlPlaneEndpoint: "192.168.66.166:8443"
imageRepository: registry.aliyuncs.com/google_containers
etcd:
  external:
    endpoints:
    - https://192.168.66.110:2379
    - https://192.168.66.111:2379
    - https://192.168.66.112:2379
    caFile: /etc/etcd/ssl/ca.pem
    certFile: /etc/etcd/ssl/server.pem
    keyFile: /etc/etcd/ssl/server-key.pem
networking:
  podSubnet: 10.244.0.0/16

---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

开始安装,完毕之后会输如下信息,然后复制主节点加入命令到m2和m3执行。

[root@k8s-m1 ssl]# kubeadm init --config=kubeadm-config.yaml --upload-certs
.......
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
#主节点加入命令
  kubeadm join 192.168.66.166:8443 --token 3fx7og.b12jhx6d6l8rssv4 \
    --discovery-token-ca-cert-hash sha256:a050762cd04061030ffc6f0d9cb32171679195e4da8c094f6f0cb09bab88cca0 \
    --control-plane --certificate-key b0f0271ac70103fc2602f83010435df7c97d4ef0c7bcdaeb066f048a2411868d

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:
工作节点加入命令
kubeadm join 192.168.66.166:8443 --token 3fx7og.b12jhx6d6l8rssv4 \
    --discovery-token-ca-cert-hash sha256:a050762cd04061030ffc6f0d9cb32171679195e4da8c094f6f0cb09bab88cca0 

m1上创建配置文件

cd /root && mkdir .kube && cp /etc/kubernetes/admin.conf .kube/config

创建kube-flannel,配置文件已单独在其他文章内,复制之后直接运行

kubectl apply -f kube-flannel.yaml

查看集群节点状态和核心组件,是否健康

[root@k8s-m1 opt]# kubectl get nodes
NAME     STATUS   ROLES                  AGE     VERSION
k8s-m1   Ready    control-plane,master   4h16m   v1.20.2
k8s-m2   Ready    control-plane,master   4h13m   v1.20.2
k8s-m3   Ready    control-plane,master   4h13m   v1.20.2

[root@k8s-m1 opt]# kubectl get pods -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-7f89b7bc75-b5sdz         1/1     Running   0          29m
coredns-7f89b7bc75-rhsbj         1/1     Running   0          29m
kube-apiserver-k8s-m1            1/1     Running   0          29m
kube-apiserver-k8s-m2            1/1     Running   0          27m
kube-apiserver-k8s-m3            1/1     Running   0          26m
kube-controller-manager-k8s-m1   1/1     Running   0          29m
kube-controller-manager-k8s-m2   1/1     Running   0          27m
kube-controller-manager-k8s-m3   1/1     Running   0          26m
kube-flannel-ds-amd64-hq8qt      1/1     Running   0          14m
kube-flannel-ds-amd64-kggwn      1/1     Running   0          14m
kube-flannel-ds-amd64-r42gv      1/1     Running   0          14m
kube-proxy-ldplv                 1/1     Running   0          26m
kube-proxy-nz7gx                 1/1     Running   0          29m
kube-proxy-ttrbj                 1/1     Running   0          27m
kube-scheduler-k8s-m1            1/1     Running   0          29m
kube-scheduler-k8s-m2            1/1     Running   0          27m
kube-scheduler-k8s-m3            1/1     Running   0          26m

查看集群使用的代理模式

[root@k8s-m1 opt]# curl 127.0.0.1:10249/proxyMode
ipvs

默认master节点是不允许调度Pod,不然Pod一直处于Pending状态,这里开启允许

[root@k8s-m1 opt]# kubectl taint nodes --all node-role.kubernetes.io/master-
node/k8s-m1 untainted
node/k8s-m2 untainted
node/k8s-m3 untainted

关闭m1节点,模拟故障,查看虚拟IP已经在m2节点

[root@k8s-m2 member]# ip addr
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:05:0f:0e brd ff:ff:ff:ff:ff:ff
    inet 192.168.66.111/24 brd 192.168.66.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet 192.168.66.166/32 scope global ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::b31c:59f5:e055:92ec/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever

查看集群状态

[root@k8s-m2 .kube]# kubectl get nodes
NAME     STATUS     ROLES                  AGE     VERSION
k8s-m1   NotReady   control-plane,master   4h47m   v1.20.2
k8s-m2   Ready      control-plane,master   4h44m   v1.20.2
k8s-m3   Ready      control-plane,master   4h44m   v1.20.2

增加一台工作节点
1.将master节点上的kubernetes.repo复制到新节点
yum -y install kubeadm
安装docker,然后执行如下操作

[root@k8s-c1 kubernetes]# kubeadm join 192.168.66.166:8443 --token 3fx7og.b12jhx6d6l8rssv4 \
>     --discovery-token-ca-cert-hash sha256:a050762cd04061030ffc6f0d9cb32171679195e4da8c094f6f0cb09bab88cca0

在m2上查看集群状态

[root@k8s-m2 opt]# kubectl get nodes -o wide
NAME     STATUS     ROLES                  AGE     VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIME
k8s-c1   Ready      <none>                 12m     v1.20.2   192.168.66.113   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9
k8s-m1   NotReady   control-plane,master   5h19m   v1.20.2   192.168.66.110   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9
k8s-m2   Ready      control-plane,master   5h17m   v1.20.2   192.168.66.111   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9
k8s-m3   Ready      control-plane,master   5h16m   v1.20.2   192.168.66.112   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9

看到ROLES为空,可以手动增加角色状态

[root@k8s-m2 opt]# kubectl label node k8s-c1 node-role.kubernetes.io/worker=worker
node/k8s-c1 labeled
[root@k8s-m2 opt]# kubectl get nodes -o wide
NAME     STATUS     ROLES                  AGE     VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIME
k8s-c1   Ready      worker                 14m     v1.20.2   192.168.66.113   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9
k8s-m1   NotReady   control-plane,master   5h21m   v1.20.2   192.168.66.110   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9
k8s-m2   Ready      control-plane,master   5h19m   v1.20.2   192.168.66.111   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9
k8s-m3   Ready      control-plane,master   5h18m   v1.20.2   192.168.66.112   <none>        CentOS Linux 7 (Core)   3.10.0-1062.el7.x86_64   docker://19.3.9

创建一个deployment

[root@k8s-m2 opt]# kubectl get pods -o wide
NAME                       READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
nginx-1-645d5c6669-x9tvw   1/1     Running   0          19s   10.244.4.4   k8s-c1   <none>           <none>

停掉c1节点,模拟工作节点故障,大概几分钟后可以看到pod自动转移到其他节点

[root@k8s-m2 opt]# kubectl get pods -o wide
NAME                       READY   STATUS        RESTARTS   AGE     IP           NODE     NOMINATED NODE   READINESS GATES
nginx-1-645d5c6669-ltkpt   1/1     Running       0          58s     10.244.2.3   k8s-m3   <none>           <none>
nginx-1-645d5c6669-x9tvw   1/1     Terminating   0          7m27s   10.244.4.4   k8s-c1   <none>           <none>

其他一些操作

# 当一个POD长时间处于删除状态,强制删除
kubectl delete pod PODNAME --force --grace-period=0
# 集群剔除一个节点
kubectl delete node k8s-c1
# 节点重置
kubeadm reset
#重新加入节点
kubeadm join --token <token> <master-ip>:<master-port> --discovery-token-ca-cert-hash sha256:<hash>
# 查看token,如果为空,token已过期
kubeadm token list
#创建token,永不过期,默认1天失效
kubeadm token create --ttl 0
# 查看hash
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
上一篇下一篇

猜你喜欢

热点阅读