K8S入门系列之集群二进制部署--> master篇(一)

2019-12-03  本文已影响0人  迷失的风儿

组件版本和配置策略

组件版本

核心插件:

主要配置策略

1. K8S系统初始化

1.1 系统环境

[root@localhost ~]# cat /etc/redhat-release 
CentOS Linux release 8.0.1905 (Core) 

1.2 修改各个节点的对应hostname, 并分别写入/etc/hosts

# 对应主机修改
hostnamectl set-hostname k8s-master01

# 写入hosts--> 注意是 >> 表示不改变原有内容追加!
cat>> /etc/hosts <<EOF
192.168.2.201 k8s-master01
192.168.2.202 k8s-master02
192.168.2.203 k8s-master03
192.168.2.11 k8s-node01
192.168.2.12 k8s-node02
EOF

1.3 所有节点安装常用工具和依赖包

yum install wget yum-utils net-tools tar curl jq ipvsadm ipset conntrack iptables sysstat libseccomp -y

1.4 所有节点关闭 firewalld, selinux 以及 swap

# 关闭防火墙并清空防火墙规则
systemctl disable firewalld && systemctl stop firewalld && systemctl status firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEP

# 关闭selinux  --->selinux=disabled 需重启生效!
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

# 关闭swap --->注释掉swap那一行, 需重启生效!
swapoff -a && sed -i '/ swap / s/^\(.*\)$/# \1/g' /etc/fstab

1.5 所有节点设置时间同步

timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0

yum install chrony -y
systemctl enable chronyd && systemctl start chronyd && systemctl status chronyd

1.6 所有节点调整内核参数, k8s必备参数!

# 先加载模块
modprobe br_netfilter

# 直接写入对应位置
cat> /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max = 6553500
net.nf_conntrack_max = 6553500
net.ipv4.tcp_max_tw_buckets = 4096
EOF

# 生效配置
sysctl -p /etc/sysctl.d/kubernetes.conf

1.7 所有节点创建k8s工作目录并设置环境变量!

# 在每台机器上创建目录:
mkdir -p /opt/k8s/{bin,cert,script}
mkdir -p /opt/etcd/{bin,cert}
mkdir -p /opt/lib/etcd
mkdir -p /root/.kube
mkdir -p /opt/log/kubernetes

# 在每台机器上添加环境变量:
sh -c "echo 'PATH=/opt/k8s/bin:/opt/etcd/bin:/opt/flanneld/bin:$PATH:$HOME/bin:$JAVA_HOME/bin' >> /etc/profile.d/k8s.sh"
# 生效配置
source /etc/profile.d/k8s.sh

1.8 无密码 ssh 登录其它节点

# 生成秘钥对 ( 在操作节点上生成)
ssh-keygen

# 将自己的公钥发给其他服务器
ssh-copy-id root@k8s-master01
ssh-copy-id root@k8s-master02
ssh-copy-id root@k8s-master03
# 重启机器, 最好做一下! 并验证检查各项初始化设置
reboot

2. 创建CA根证书和密钥

2.1 安装cfssl工具集

[root@k8s-master01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[root@k8s-master01 ~]# mv cfssl_linux-amd64 /opt/k8s/bin/cfssl

[root@k8s-master01 ~]# wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[root@k8s-master01 ~]# mv cfssljson_linux-amd64 /opt/k8s/bin/cfssljson

[root@k8s-master01 ~]# wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[root@k8s-master01 ~]# mv cfssl-certinfo_linux-amd64 /opt/k8s/bin/cfssl-certinfo

[root@k8s-master01 ~]# chmod +x /opt/k8s/bin/*

2.2 创建根证书CA

2.03 创建配置文件

[root@k8s-master01 ~]# cat> /opt/k8s/cert/ca-config.json <<EOF
{
    "signing": {
        "default": {
            "expiry": "876000h"
        },
        "profiles": {
            "kubernetes": {
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ],
                "expiry": "876000h"
            }
        }
    }
}
EOF

2.4 创建 CA 证书签名请求模板

[root@k8s-master01 ~]# cat > /opt/k8s/cert/ca-csr.json <<EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "steams"
        }
    ]
}
EOF

2.5 生成CA证书、私钥和csr证书签名请求

[root@k8s-master01 ~]## cfssl gencert -initca /opt/k8s/cert/ca-csr.json | cfssljson -bare /opt/k8s/cert/ca

# 查看是否生成!
[root@k8s-master01 ~]# ls /opt/k8s/cert/
ca-config.json  ca.csr  ca-csr.json  ca-key.pem  ca.pem

2.6 分发证书文件

# 创建分发脚本
[root@k8s-master01 cert]# vi /opt/k8s/script/scp_k8s_cacert.sh 
MASTER_IPS=("$1" "$2" "$3")
for master_ip in ${MASTER_IPS[@]};do
    echo ">>> ${master_ip}"
    scp /opt/k8s/cert/ca*.pem /opt/k8s/cert/ca-config.json root@${master_ip}:/opt/k8s/cert
done

# 执行脚本, 注意传参!
[root@k8s-master01 cert]# bash /opt/k8s/script/scp_k8s_cacert.sh 192.168.2.201 192.168.2.202 192.168.2.203

3. 部署etcd集群

3.1 下载二进制文件

[root@k8s-master01 ~]# wget https://github.com/etcd-io/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz
[root@k8s-master01 ~]# tar -xvf etcd-v3.3.18-linux-amd64.tar.gz 

3.2 创建etcd证书请求模板文件

[root@k8s-master01 cert]# cat > /opt/etcd/cert/etcd-csr.json <<EOF
{
    "CN": "etcd",
    "hosts": [
        "127.0.0.1",
        "192.168.2.201",
        "192.168.2.202",
        "192.168.2.203"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "k8s",
            "OU": "steams"
        }
    ]
}
EOF

3.3 生成证书和私钥

[root@k8s-master01 ~]# cfssl gencert \
-ca=/opt/k8s/cert/ca.pem \
-ca-key=/opt/k8s/cert/ca-key.pem \
-config=/opt/k8s/cert/ca-config.json \
-profile=kubernetes /opt/etcd/cert/etcd-csr.json | cfssljson -bare /opt/etcd/cert/etcd

# 查看是否生成!
[root@k8s-master01 ~]# ls /opt/etcd/cert/*
etcd.csr       etcd-csr.json  etcd-key.pem   etcd.pem  

3.4 分发生成的证书, 私钥和etcd安装文件到各etcd节点

# 创建分发 etcd 以及证书私钥
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_etcd.sh
MASTER_IPS=("$1" "$2" "$3")
for master_ip in ${MASTER_IPS[@]};do
        echo ">>> ${master_ip}"
        scp /root/etcd-v3.3.18-linux-amd64/etcd* root@${master_ip}:/opt/etcd/bin
        ssh root@${master_ip} "chmod +x /opt/etcd/bin/*"
        scp /opt/etcd/cert/etcd*.pem root@${master_ip}:/opt/etcd/cert/
done

# 执行脚本, 注意传参!
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_etcd.sh 192.168.2.201 192.168.2.202 192.168.2.203

3.5 为所有 etcd 节点(共用master节点)创建 etcd 的 systemd unit 文件

[root@k8s-master01 ~]# vi /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
Documentation=https://github.com/coreos
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
User=root
Type=notify
WorkingDirectory=/opt/lib/etcd/
ExecStart=/opt/etcd/bin/etcd \
--client-cert-auth=true \
--trusted-ca-file=/opt/k8s/cert/ca.pem \
--cert-file=/opt/etcd/cert/etcd.pem \
--key-file=/opt/etcd/cert/etcd-key.pem \
--peer-client-cert-auth=true \
--peer-trusted-ca-file=/opt/k8s/cert/ca.pem \
--peer-cert-file=/opt/etcd/cert/etcd.pem \
--peer-key-file=/opt/etcd/cert/etcd-key.pem \
--name=##ETCD_NAME## \
--data-dir=/opt/lib/etcd \
--listen-client-urls=https://##MASTER_IP##:2379 \
--listen-peer-urls=https://##MASTER_IP##:2380 \
--initial-cluster-state=new \
--initial-cluster-token=k8s-etcd-cluster  \
--advertise-client-urls=https://##MASTER_IP##:2379 \
--initial-advertise-peer-urls=https://##MASTER_IP##:2380 \
--initial-cluster=etcd0=https://192.168.2.201:2380,etcd1=https://192.168.2.202:2380,etcd2=https://192.168.2.203:2380 
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

3.6 在所有节点为 etcd 服务设置开机启动, 并启动检查服务

systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd && systemctl status etcd

3.7 验证集群

# 查看集群健康状态
etcdctl --endpoints=https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379 \
--cert-file=/opt/etcd/cert/etcd.pem \
--ca-file=/opt/k8s/cert/ca.pem \
--key-file=/opt/etcd/cert/etcd-key.pem \
cluster-health

# 查看集群成员列表
etcdctl --endpoints=https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379 \
--cert-file=/opt/etcd/cert/etcd.pem \
--ca-file=/opt/k8s/cert/ca.pem \
--key-file=/opt/etcd/cert/etcd-key.pem \
member list

4. 部署kubectl命令行工具

4.1 下载kubectl二进制文件

# 下载二进制文件(直连无法下载!)
[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.17/kubernetes-server-linux-amd64.tar.gz
# 解压, 复制到相应目录以及增加执行权限
[root@k8s-master01 ~]# tar -zxvf kubernetes-server-linux-amd64.tar.gz
[root@k8s-master01 ~]# cp ./kubernetes/server/bin/kubectl /opt/k8s/bin/ && chmod +x /opt/k8s/bin/*

4.2 创建 admin 证书和私钥

创建证书签名请求

[root@k8s-master01 ~]# cat > /opt/k8s/cert/admin-csr.json <<EOF
{
    "CN": "admin",
    "hosts": [],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "system:masters",
            "OU": "steams"
        }
    ]
}
EOF

生成证书和私钥

[root@k8s-master01 ~]# cfssl gencert \
-ca=/opt/k8s/cert/ca.pem \
-ca-key=/opt/k8s/cert/ca-key.pem \
-config=/opt/k8s/cert/ca-config.json \
-profile=kubernetes /opt/k8s/cert/admin-csr.json | cfssljson -bare /opt/k8s/cert/admin

# 查看生成的证书和私钥
[root@k8s-master01 ~]# ls /opt/k8s/cert/admin*
admin.csr       admin-csr.json  admin-key.pem   admin.pem  

4.3 创建 kubectl 的 kubeconfig 配置文件

# step.1 设置集群参数
# --server=${KUBE_APISERVER}, 指定IP和端口; 本文使用的是haproxy的VIP和端口;
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/cert/ca.pem \
--embed-certs=true \
--server=https://192.168.2.210:8443 

# step.2 设置客户端认证参数
[root@k8s-master01 ~]# kubectl config set-credentials kube-admin \
--client-certificate=/opt/k8s/cert/admin.pem \
--client-key=/opt/k8s/cert/admin-key.pem \
--embed-certs=true 

# step.3 设置上下文参数
[root@k8s-master01 ~]#  kubectl config set-context kube-admin@kubernetes \
--cluster=kubernetes \
--user=kube-admin 

# step.4设置默认上下文
[root@k8s-master01 ~]# kubectl config use-context kube-admin@kubernetes

--certificate-authority :验证 kube-apiserver 证书的根证书;
--client-certificate 、 --client-key :刚生成的 admin 证书和私钥,连接 kube-apiserver 时使用;
--embed-certs=true :将 ca.pem 和 admin.pem 证书内容嵌入到生成的kubectl.kubeconfig 文件中(不加时,写入的是证书文件路径);

4.4 分发 kubeclt , 证书私钥和kubeconfig 文件

# 编写分发脚本
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_kubectl_config.sh
MASTER_IPS=("$1" "$2" "$3")
for master_ip in ${MASTER_IPS[@]};do
    echo ">>> ${master_ip}"
    scp /root/kubernetes/server/bin/kubectl root@${master_ip}:/opt/k8s/bin/
    ssh root@${master_ip} "chmod +x /opt/k8s/bin/*"
    scp /opt/k8s/cert/admin*.pem root@${master_ip}:/opt/k8s/cert/
    scp /root/.kube/config root@${master_ip}:/root/.kube/config
done

# 执行脚本, 注意传参!
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_kubectl_config.sh 192.168.2.201 192.168.2.202 192.168.2.203

5. 部署master节点

下载二进制文件, 想办法!!!

# 下载二进制文件
[root@k8s-master01 ~]# wget https://dl.k8s.io/v1.17/kubernetes-server-linux-amd64.tar.gz
# 解压
[root@k8s-master01 ~]# tar -xvf kubernetes-server-linux-amd64.tar.gz

将二进制文件拷贝到所有 master 节点

# 编写脚本
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_master.sh
MASTER_IPS=("$1" "$2" "$3")
for master_ip in ${MASTER_IPS[@]};do
    echo ">>> ${master_ip}"
    scp /root/kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler} root@${master_ip}:/opt/k8s/bin/
    ssh root@${master_ip} "chmod +x /opt/k8s/bin/*"
done

# 执行, 注意传参!
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_master.sh 192.168.2.201 192.168.2.202 192.168.2.203

5.1 部署高可用组件

5.1.1 在 master 节点分别安装haproxy, keepalived,并配置haproxy 配置文件

# 安装haproxy, keepalived
[root@k8s-master01 ~]# yum install keepalived haproxy -y

# 配置haproxy 配置文件
[root@k8s-master01 ~]# vi /etc/haproxy/haproxy.cfg 
global
    log /dev/log local0
    log /dev/log local1 notice
    chroot /var/lib/haproxy
    stats socket /var/run/haproxy-admin.sock mode 660 level admin
    stats timeout 30s
    user haproxy
    group haproxy
    daemon
    nbproc 1
defaults
    log global
    timeout connect 5000
    timeout client 10m
    timeout server 10m
listen admin_stats
    bind 0.0.0.0:10080
    mode http
    log 127.0.0.1 local0 err
    stats refresh 30s
    stats uri /status
    stats realm welcome login\ Haproxy
    stats auth haproxy:123456
    stats hide-version
    stats admin if TRUE
listen k8s-master
    bind 0.0.0.0:8443
    mode tcp
    option tcplog
    balance source
    server 192.168.2.201 192.168.2.201:6443 check inter 2000 fall 2 rise 2 weight 1
    server 192.168.2.202 192.168.2.202:6443 check inter 2000 fall 2 rise 2 weight 1
    server 192.168.2.203 192.168.2.203:6443 check inter 2000 fall 2 rise 2 weight 1

设置开机启动服务, 并开启三个 master 节点的 haproxy 服务

systemctl enable haproxy && systemctl restart haproxy && systemctl status haproxy
# 检查运行是否正常, 任意master节点执行!
[root@k8s-master01 ~]# netstat -lnpt | grep haproxy
# 输出类似:
Active: active (running) since Tue 2019-11-12 01:54:41 CST; 543ms ago
tcp        0      0 0.0.0.0:8443            0.0.0.0:*               LISTEN      4995/haproxy        
tcp        0      0 0.0.0.0:10080           0.0.0.0:*               LISTEN      4995/haproxy   

5.1.2 配置和启动 keepalived 服务

在192.168.2.201 master主服务的配置文件:

[root@k8s-master01 ~]# vi /etc/keepalived/keepalived.conf
global_defs {
    router_id keepalived_201  # ......
}
vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 5
    weight -30
}
vrrp_instance VI-k8s-master {
    state MASTER
    priority 120    # 第一台从数值 -10, 以此类推!
    dont_track_primary
    interface eth0
    virtual_router_id 201  # ......
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        192.168.2.210
    }
}

在192.168.2.202, 192.168.2.203两台backup 服务的配置文件:

[root@k8s-master02 ~]# vi /etc/keepalived/keepalived.conf

global_defs {
        router_id keepalived_202    # 对应设置
}
vrrp_script check-haproxy {
        script "killall -0 haproxy"
        interval 5
        weight -30
}
vrrp_instance VI-k8s-master {
        state BACKUP
        priority 110   # 第2台从数值 -10
        dont_track_primary
        interface eth0
        virtual_router_id 202  # 对应设置
        advert_int 3
        track_script {
        check-haproxy
        }
        virtual_ipaddress {
            192.168.2.210
        }
}

设置开机启动服务, 并开启三个 master 节点的 keepalived 服务

systemctl enable keepalived && systemctl restart keepalived && systemctl status keepalived
[root@k8s-master01 ~]# ip addr
...
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
    link/ether 00:15:5d:00:68:05 brd ff:ff:ff:ff:ff:ff
    inet 192.168.2.201/24 brd 192.168.2.255 scope global noprefixroute eth0
       valid_lft forever preferred_lft forever
    inet 192.168.2.210/32 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::f726:9d22:2b89:694c/64 scope link noprefixroute 
       valid_lft forever preferred_lft forever
...

5.1.6 查看 haproxy 状态页面

5.2 部署 kube-apiserver 组件

下载二进制文件

5.2.1 创建 kube-apiserver证书和私钥

创建证书签名请求

[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-apiserver-csr.json <<EOF
{
    "CN": "kubernetes",
    "hosts": [
        "127.0.0.1",
        "10.96.0.1",
        "192.168.2.210",
        "192.168.2.201",
        "192.168.2.202",
        "192.168.2.203",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
          "C": "CN",
          "ST": "BeiJing",
          "L": "BeiJing",
          "O": "k8s",
          "OU": "steams"
        }
    ]
}
EOF

生成证书和私钥

[root@k8s-master01 ~]# cfssl gencert \
-ca=/opt/k8s/cert/ca.pem \
-ca-key=/opt/k8s/cert/ca-key.pem \
-config=/opt/k8s/cert/ca-config.json \
-profile=kubernetes /opt/k8s/cert/kube-apiserver-csr.json | cfssljson -bare /opt/k8s/cert/kube-apiserver

[root@k8s-master01 ~]# ls /opt/k8s/cert/kube-apiserver*
kube-apiserver.csr      kube-apiserver-csr.json  kube-apiserver-key.pem  kube-apiserver.pem 

5.2.2 手动生成 token 并保存在 /opt/k8s/bootstrap-token.csv 里

# 生成 token
[root@k8s-master01 ~]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
fb8f04963e38858eab0867e8d2296d6b

# 保存
[root@k8s-master01 ~]# vi /opt/k8s/bootstrap-token.csv
fb8f04963e38858eab0867e8d2296d6b,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

或者:

# 生成, 并写入文件
[root@k8s-master01 ~]# echo "`head -c 16 /dev/urandom | od -An -t x | tr -d ' '`,kubelet-bootstrap,10001,\"system:kubelet-bootstrap\"" > /opt/k8s/bootstrap-token.csv

# 查看
[root@k8s-master01 ~]# cat /opt/k8s/bootstrap-token.csv

5.2.3 将生成的证书和私钥、加密配置文件和 bootstrap-token.csv 传送到 所有 master 节点的 /opt/k8s 目录下

# 编写传送脚本
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_apiserver.sh
MASTER_IPS=("$1" "$2" "$3")
for master_ip in ${MASTER_IPS[@]};do
    echo  ">>> ${master_ip}"
    scp /opt/k8s/cert/kube-apiserver*.pem root@${master_ip}:/opt/k8s/cert/
    scp /opt/k8s/bootstrap-token.csv root@${master_ip}:/opt/k8s/
done 

# 执行脚本, 注意传参!
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_apiserver.sh 192.168.2.201 192.168.2.202 192.168.2.203

5.2.4 为所有 master 节点分别创建 kube-apiserver 的 systemd unit 文件

[root@k8s-master01 ~]# vi /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service

[Service]
ExecStart=/opt/k8s/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds  \
--allow-privileged=true \
--anonymous-auth=false \
--authorization-mode=Node,RBAC \
--advertise-address=##MASTER_IP## \
--bind-address=##MASTER_IP## \
--secure-port=6443 \
--service-cluster-ip-range=10.96.0.0/16 \
--service-node-port-range=30000-50000 \
--enable-bootstrap-token-auth=true \
--token-auth-file=/opt/k8s/bootstrap-token.csv \
--client-ca-file=/opt/k8s/cert/ca.pem \
--runtime-config=api/all=true \
--service-account-key-file=/opt/k8s/cert/ca-key.pem \
--tls-cert-file=/opt/k8s/cert/kube-apiserver.pem \
--tls-private-key-file=/opt/k8s/cert/kube-apiserver-key.pem \
--kubelet-https=true \
--kubelet-certificate-authority=/opt/k8s/cert/ca.pem \
--kubelet-client-certificate=/opt/k8s/cert/kube-apiserver.pem \
--kubelet-client-key=/opt/k8s/cert/kube-apiserver-key.pem \
--etcd-cafile=/opt/k8s/cert/ca.pem \
--etcd-certfile=/opt/k8s/cert/kube-apiserver.pem \
--etcd-keyfile=/opt/k8s/cert/kube-apiserver-key.pem \
--etcd-servers=https://192.168.2.201:2379,https://192.168.2.202:2379,https://192.168.2.203:2379 \
--apiserver-count=3 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/log/kube-apiserver-audit.log \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

5.2.5 各个 master 节点启动并检查 kube-apiserver 服务

# 启动
systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver && systemctl status kube-apiserver

5.2.6 授予 kubernetes 证书访问 kubelet API 的权限 (待定!!!)

[root@k8s-master01 ~]# kubectl create clusterrolebinding kube-apiserver:kubelet-apis \
--clusterrole=system:kubelet-api-admin \
--user=kubernetes

5.2.7 检查, 验证集群

[root@k8s-master01 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.2.210:8443

[root@k8s-master01 ~]# kubectl get all --all-namespaces
NAMESPACE   NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
default     service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   49m

# 6443: 接收 https 请求的安全端口,对所有请求做认证和授权;
[root@k8s-master01 ~]# ss -nutlp | grep apiserver
tcp    LISTEN   0        128         192.168.2.201:6443           0.0.0.0:*      users:(("kube-apiserver",pid=3342,fd=8))                                       
tcp    LISTEN   0        128             127.0.0.1:8080           0.0.0.0:*      users:(("kube-apiserver",pid=3342,fd=7))  

5.3 部署高可用kube-controller-manager 集群

准备工作:下载kube-controller-manager二进制文件(包含在kubernetes-server包里, 已解压发送)

5.3.1 创建 kube-controller-manager 的证书和私钥

创建证书签名请求:

[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-controller-manager-csr.json <<EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
        "127.0.0.1",
        "192.168.2.201",
        "192.168.2.202",
        "192.168.2.203",
        "localhost"
    ],
    "names": [
        {
            "C": "CN",
            "ST": "BeiJing",
            "L": "BeiJing",
            "O": "system:kube-controller-manager",
            "OU": "steams"
        }
    ]
}
EOF

生成证书和私钥

[root@k8s-master01 ~]# cfssl gencert \
-ca=/opt/k8s/cert/ca.pem \
-ca-key=/opt/k8s/cert/ca-key.pem \
-config=/opt/k8s/cert/ca-config.json \
-profile=kubernetes /opt/k8s/cert/kube-controller-manager-csr.json | cfssljson -bare /opt/k8s/cert/kube-controller-manager

# 查看证书
[root@k8s-master01 ~]# ls /opt/k8s/cert/kube-controller-manager*
kube-controller-manager.csr       kube-controller-manager-csr.json  kube-controller-manager-key.pem   kube-controller-manager.pem  

5.3.2 创建 kube-controller-manager.kubeconfig 文件

### --kubeconfig:指定kubeconfig文件路径与文件名;如果不设置,默认生成在~/.kube/config文件。
### 后面需要用到此文件,所以我们把配置信息单独指向到指定文件中
# step.1 设置集群参数:
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/cert/ca.pem \
--embed-certs=true \
--server=https://192.168.2.210:8443 \
--kubeconfig=/opt/k8s/kube-controller-manager.kubeconfig

# step.2 设置客户端认证参数
[root@k8s-master01 ~]# kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/opt/k8s/cert/kube-controller-manager.pem \
--client-key=/opt/k8s/cert/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/opt/k8s/kube-controller-manager.kubeconfig

# step.3 设置上下文参数
[root@k8s-master01 ~]# kubectl config set-context system:kube-controller-manager@kubernetes \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/opt/k8s/kube-controller-manager.kubeconfig

# tep.4 设置默认上下文
[root@k8s-master01 ~]# kubectl config use-context system:kube-controller-manager@kubernetes \
--kubeconfig=/opt/k8s/kube-controller-manager.kubeconfig

5.3.3 分发生成的证书和私钥、kubeconfig 到所有 master 节点

# 编写分发脚本
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_controller-manager.sh
MASTER_IPS=("$1" "$2" "$3")
for master_ip in ${MASTER_IPS[@]};do
    echo ">>> ${master_ip}"
    scp /opt/k8s/cert/kube-controller-manager*.pem root@${master_ip}:/opt/k8s/cert/
    scp /opt/k8s/kube-controller-manager.kubeconfig root@${master_ip}:/opt/k8s/
done

# 执行, 注意传参!
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_controller-manager.sh 192.168.2.201 192.168.2.202 192.168.2.203

5.3.4 为所有 master 节点分别创建 kube-controller-manager 的 systemd unit 文件

[root@k8s-master01 ~]# vi /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service

[Service]
ExecStart=/opt/k8s/bin/kube-controller-manager \
--address=127.0.0.1 \
--port=10252 \
--bind-address=0.0.0.0 \
--secure-port=10257 \
--cluster-name=kubernetes \
--allocate-node-cidrs=true \
--service-cluster-ip-range=10.96.0.0/16 \
--authentication-kubeconfig=/opt/k8s/kube-controller-manager.kubeconfig \
--authorization-kubeconfig=/opt/k8s/kube-controller-manager.kubeconfig \
--kubeconfig=/opt/k8s/kube-controller-manager.kubeconfig \
--root-ca-file=/opt/k8s/cert/ca.pem \
--use-service-account-credentials=true \
--service-account-private-key-file=/opt/k8s/cert/ca-key.pem \
--cluster-signing-cert-file=/opt/k8s/cert/ca.pem \
--cluster-signing-key-file=/opt/k8s/cert/ca-key.pem \
--experimental-cluster-signing-duration=876000h \
--client-ca-file=/opt/k8s/cert/ca.pem \
--requestheader-client-ca-file=/opt/k8s/cert/ca.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--horizontal-pod-autoscaler-use-rest-clients=true \
--horizontal-pod-autoscaler-sync-period=10s \
--tls-cert-file=/opt/k8s/cert/kube-controller-manager.pem \
--tls-private-key-file=/opt/k8s/cert/kube-controller-manager-key.pem \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

5.3.5 为各 master 节点设置开机启动, 并检查启动 kube-controller-manager

systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl start kube-controller-manager && systemctl status kube-controller-manager
# 查看输出的 metric
[root@k8s-master03 ~]# ss -nutlp |grep kube-controll
tcp    LISTEN   0        128             127.0.0.1:10252          0.0.0.0:*      users:(("kube-controller",pid=3951,fd=6))                                      
tcp    LISTEN   0        128                     *:10257                *:*      users:(("kube-controller",pid=3951,fd=7)) 

# 停掉一个或两个节点的 kube-controller-manager 服务,观察其它节点的日志,看是否获取了 leader 权限。
# 查看当前的 leader 
[root@k8s-master02 ~]# kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml

6.4 部署高可用 kube-scheduler 集群

准备工作:下载kube-scheduler 的二进制文件---^^^

6.4.1 创建 kube-scheduler 证书和私钥

创建证书签名请求:

[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-scheduler-csr.json <<EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
      "127.0.0.1",
      "192.168.2.201",
      "192.168.2.202",
      "192.168.2.203",
      "localhost"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-scheduler",
        "OU": "steams"
      }
    ]
}
EOF

生成证书和私钥

[root@k8s-master01 ~]# cfssl gencert \
-ca=/opt/k8s/cert/ca.pem \
-ca-key=/opt/k8s/cert/ca-key.pem \
-config=/opt/k8s/cert/ca-config.json \
-profile=kubernetes /opt/k8s/cert/kube-scheduler-csr.json | cfssljson -bare /opt/k8s/cert/kube-scheduler

# 查看证书
[root@k8s-master01 ~]# ls /opt/k8s/cert/kube-scheduler*
kube-scheduler.csr       kube-scheduler-csr.json  kube-scheduler-key.pem   kube-scheduler.pem  

5.4.2 创建 kube-scheduler.kubeconfig 文件

# step.1 设置集群参数
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/cert/ca.pem \
--embed-certs=true \
--server=https://192.168.2.210:8443 \
--kubeconfig=/opt/k8s/kube-scheduler.kubeconfig

# step.2 设置客户端认证参数
[root@k8s-master01 ~]# kubectl config set-credentials system:kube-scheduler \
--client-certificate=/opt/k8s/cert/kube-scheduler.pem \
--client-key=/opt/k8s/cert/kube-scheduler-key.pem \
--embed-certs=true  \
--kubeconfig=/opt/k8s/kube-scheduler.kubeconfig

# step.3 设置上下文参数
[root@k8s-master01 ~]# kubectl config set-context system:kube-scheduler@kubernetes \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/opt/k8s/kube-scheduler.kubeconfig

# step.4设置默认上下文
[root@k8s-master01 ~]# kubectl config use-context system:kube-scheduler@kubernetes \
--kubeconfig=/opt/k8s/kube-scheduler.kubeconfig

5.4.3 分发生成的证书和私钥、kubeconfig 到所有 master 节点

# 编写分发脚本
[root@k8s-master01 ~]# vi /opt/k8s/script/scp_scheduler.sh
MASTER_IPS=("$1" "$2" "$3")
for master_ip in ${MASTER_IPS[@]};do
        echo ">>> ${master_ip}"
        scp /opt/k8s/cert/kube-scheduler*.pem root@${master_ip}:/opt/k8s/cert/
        scp /opt/k8s/kube-scheduler.kubeconfig root@${master_ip}:/opt/k8s/
done

# 执行脚本, 注意传参!
[root@k8s-master01 ~]# bash /opt/k8s/script/scp_scheduler.sh 192.168.2.201 192.168.2.202 192.168.2.203

5.4.4 为各个 master 节点分别创建kube-scheduler 的 systemd unit 文件

[root@k8s-master01 ~]# vi /etc/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service

[Service]
ExecStart=/opt/k8s/bin/kube-scheduler \
--address=127.0.0.1 \
--port=10251 \
--bind-address=0.0.0.0 \
--secure-port=10259 \
--kubeconfig=/opt/k8s/kube-scheduler.kubeconfig \
--client-ca-file=/opt/k8s/cert/ca.pem \
--requestheader-client-ca-file=/opt/k8s/cert/ca.pem \
--tls-cert-file=/opt/k8s/cert/kube-scheduler.pem \
--tls-private-key-file=/opt/k8s/cert/kube-scheduler-key.pem \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

5.4.5 为所有 master 节点设置开机启动, 并启动检查 kube-scheduler 服务

systemctl daemon-reload && systemctl enable kube-scheduler && systemctl start kube-scheduler && systemctl status kube-scheduler
# 查看输出的 metric
[root@k8s-master01 ~]# ss -nutlp |grep kube-scheduler
tcp    LISTEN   0        128             127.0.0.1:10251          0.0.0.0:*      users:(("kube-scheduler",pid=8584,fd=6))                                       
tcp    LISTEN   0        128                     *:10259                *:*      users:(("kube-scheduler",pid=8584,fd=7))   
                                    
[root@k8s-master01 ~]# curl -s http://127.0.0.1:10251/metrics |head
# HELP apiserver_audit_event_total [ALPHA] Counter of audit events generated and sent to the audit backend.
# TYPE apiserver_audit_event_total counter
apiserver_audit_event_total 0
# HELP apiserver_audit_requests_rejected_total [ALPHA] Counter of apiserver requests rejected due to an error in audit logging backend.
# TYPE apiserver_audit_requests_rejected_total counter
apiserver_audit_requests_rejected_total 0
# HELP apiserver_client_certificate_expiration_seconds [ALPHA] Distribution of the remaining lifetime on the certificate used to authenticate a request.
# TYPE apiserver_client_certificate_expiration_seconds histogram
apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
apiserver_client_certificate_expiration_seconds_bucket{le="1800"} 0

# 停掉一个或两个节点的 kube-scheduler 服务,观察其它节点的日志,看是否获取了 leader 权限。
# 查看当前的 leader 
[root@k8s-master02 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml

master 节点基本组件已部署完毕!

# 集群健康检查
kubectl get cs

:

上一篇下一篇

猜你喜欢

热点阅读