K8S入门系列之集群二进制部署--> node篇(二)

2019-12-12  本文已影响0人  迷失的风儿

node节点组件 (v1.17)

kubernetes-server-linux-amd64.tar.gz(相关的这里都能找到二进制文件!)

1. 系统初始化

1.1 系统环境

[root@localhost ~]# cat /etc/redhat-release 
CentOS Linux release 8.0.1905 (Core) 

1.2 修改各个节点的对应hostname, 并分别写入/etc/hosts

hostnamectl set-hostname k8s-node01
# 写入hosts--> 注意是 >> 表示不改变原有内容追加!
cat>> /etc/hosts <<EOF
192.168.2.201 k8s-master01
192.168.2.202 k8s-master02
192.168.2.203 k8s-master03
192.168.2.11 k8s-node01
192.168.2.12 k8s-node02
EOF

1.3 安装依赖包和常用工具

yum install  wget vim yum-utils net-tools tar chrony curl jq ipvsadm ipset conntrack iptables sysstat libseccomp -y

1.4 所有节点关闭firewalld, selinux以及swap

# 关闭防火墙并清空防火墙规则
systemctl disable --now firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEP

# 关闭selinux  --->selinux=disabled 需重启生效!
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

# 关闭swap --->注释掉swap那一行, 需重启生效!
swapoff -a && sed -i '/ swap / s/^\(.*\)$/# \1/g' /etc/fstab

1.5 所有节点设置时间同步

timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0

systemctl enable chronyd && systemctl restart chronyd

1.6 调整内核参数, k8s必备参数!

# 先加载模块
modprobe br_netfilter

# 写入配置文件
cat> /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max = 6553500
net.nf_conntrack_max = 6553500
net.ipv4.tcp_max_tw_buckets = 4096
EOF

# 生效配置
sysctl -p /etc/sysctl.d/kubernetes.conf

1.6 kube-proxy开始ipvs的前置条件

# 写入配置文件
cat> /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

# 引导和验证!
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

1.7 在每台node上预建目录

# 预创建目录
mkdir -p /opt/k8s/{bin,cert}
mkdir -p /opt/lib/{kubelet,kube-proxy}

# 在每台node上添加环境变量:
sh -c "echo 'PATH=/opt/k8s/bin:$PATH:$HOME/bin:$JAVA_HOME/bin' >> /etc/profile.d/k8s.sh"
# 生效环境变量
source /etc/profile.d/k8s.sh

2. 部署安装 docker

2.1 yum 安装 docker

# 添加 docker 源
yum-config-manager  --add-repo   https://download.docker.com/linux/centos/docker-ce.repo

# 安装配套 containerd.io (用得是 el7 得...)
yum install https://download.docker.com/linux/centos/7/x86_64/stable/Packages/containerd.io-1.2.6-3.3.el7.x86_64.rpm

# yum 安装docker-ce 19.03
yum install docker-ce

2.2 docker配置文件修改

2.3 设置开机启动, 并启动验证 docker 服务

systemctl enable docker && systemctl daemon-reload && systemctl restart docker && systemctl status docker

3. 部署 kubelet 组件

3.1 下载二进制kubelet文件

kubernetes-server-linux-amd64.tar.gz    # 我在这里, 在这里!  里面有 kubectl 二进制单文件工具!

# 传送过去, 顺便也把 kube-proxy 也传送过去!
[root@k8s-master01 ~]# scp /root/kubernetes/server/bin/{kubelet,kube-proxy} root@k8s-node01:/opt/k8s/bin/

3.2 在 master 节点上创建角色绑定

[root@k8s-master01 ~]# kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

--user=kubelet-bootstrap 是部署kube-apiserver时创建bootstrap-token.csv文件中指定的用户,同时也需要写入bootstrap.kubeconfig 文件

3.3 在 master 节点上为要加入的 node 创建 kubelet-bootstrap.kubeconfig 文件

# 设置集群参数
[root@k8s-master01 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/cert/ca.pem \
--embed-certs=true \
--server=https://192.168.2.210:8443 \
--kubeconfig=/opt/k8s/kubelet-bootstrap.kubeconfig

# 设置客户端认证参数
### tocker是前文提到的bootstrap-token.csv文件中token值
[root@k8s-master01 ~]# kubectl config set-credentials kubelet-bootstrap \
--token=23f6d5b6ddb2779c048ef13197d4aa2b \
--kubeconfig=/opt/k8s/kubelet-bootstrap.kubeconfig

# 设置上下文参数
[root@k8s-master01 ~]# kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=/opt/k8s/kubelet-bootstrap.kubeconfig

# 设置默认上下文
 [root@k8s-master01 ~]# kubectl config use-context default \
--kubeconfig=/opt/k8s/kubelet-bootstrap.kubeconfig

传送相关所需文件

# 把 ca 证书和私钥传送给 node 节点
scp /opt/k8s/cert/ca*.pem root@k8s-node01:/opt/k8s/cert/

# 把生成的 kubelet-bootstrap.kubeconfig 传送给 node 节点 (可复用!)
scp /opt/k8s/kubelet-bootstrap.kubeconfig root@k8s-node01:/opt/k8s/

3.4 在 node 节点上创建对应的 kubelet 的 systemd unit 文件

[root@k8s-node01 ~]# vi /etc/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/opt/lib/kubelet
ExecStart=/opt/k8s/bin/kubelet \
--bootstrap-kubeconfig=/opt/k8s/kubelet-bootstrap.kubeconfig \
--cert-dir=/opt/k8s/cert/  \
--kubeconfig=/opt/k8s/kubelet.kubeconfig \
--anonymous-auth=false \
--authorization-mode=Webhook \
--authentication-token-webhook=true \
--client-ca-file=/opt/k8s/cert/ca.pem \
--address=##NODE_IP## \
--cgroup-driver=cgroupfs \
--image-pull-progress-deadline=300s \
--node-labels=node.kubernetes.io/k8s-node=true \
--cluster-dns=10.96.0.2 \
--cluster-domain=cluster.local \
--node-ip=##NODE_IP## \
--port=10250 \
--hostname-override=##NODE_NAME## \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 \
--network-plugin=cni \
--cni-conf-dir=/opt/cni/net.d \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/log/kubernetes \
--v=2
Restart=on-failure
RestartSec=5
KillMode=process

[Install]
WantedBy=multi-user.target

3.6 设置开机启动. 并开启检查 kubelet 服务

systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet && systemctl status kubelet 

3.7 在 master 上批准kubelet 的 TLS 证书请求

手动 approve csr 请求

# 查看 CSR 列表:
[root@k8s-master01 ~]# kubectl get csr
NAME                                                   AGE   REQUESTOR           CONDITION
node-csr--LyhqMsoBZHufxq_PNLzryNXUZGHhGy1sbdclP6pPoE   12m   kubelet-bootstrap   Pending


# 手动approve csr:
[root@k8s-master01 ~]# kubectl certificate approve node-csr--LyhqMsoBZHufxq_PNLzryNXUZGHhGy1sbdclP6pPoE
certificatesigningrequest.certificates.k8s.io/node-csr--LyhqMsoBZHufxq_PNLzryNXUZGHhGy1sbdclP6pPoE approved


# )查看 approve 结果:
[root@k8s-master01 ~]# kubectl get csr
NAME        AGE   REQUESTOR           CONDITION
csr-wzzm5   61s   kubelet-bootstrap   Approved

自动 approve csr 请求!

[root@k8s-master01 ~]# cat > /opt/k8s/csr-crb.yaml <<EOF
 # Approve all CSRs for the group "system:bootstrappers"
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: auto-approve-csrs-for-group
 subjects:
 - kind: Group
   name: system:bootstrappers
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
   apiGroup: rbac.authorization.k8s.io
---
 # To let a node of the group "system:nodes" renew its own credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-client-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
   apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]
---
 # To let a node of the group "system:nodes" renew its own server credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-server-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: approve-node-server-renewal-csr
   apiGroup: rbac.authorization.k8s.io
EOF

生效配置:

[root@k8s-master01 ~]# kubectl apply -f /opt/k8s/csr-crb.yaml
clusterrolebinding.rbac.authorization.k8s.io/auto-approve-csrs-for-group created
clusterrolebinding.rbac.authorization.k8s.io/node-client-cert-renewal created
clusterrole.rbac.authorization.k8s.io/approve-node-server-renewal-csr created
clusterrolebinding.rbac.authorization.k8s.io/node-server-cert-renewal created

4. 部署 kube-proxy 组件

4.1 下载 kube-proxy 二进制文件

kubernetes-server-linux-amd64.tar.gz    # 我在这里, 在这里!  里面有 kube-proxy 二进制单文件工具!

4.2 在 master 节点上创建 kube-proxy 证书

在 master 节点上创建证书请求文件

[root@k8s-master01 ~]# cat > /opt/k8s/cert/kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "steams"
    }
  ]
}
EOF

在 master 节点上生成证书和私钥

[root@k8s-master01 ~]# cfssl gencert \
-ca=/opt/k8s/cert/ca.pem \
-ca-key=/opt/k8s/cert/ca-key.pem \
-config=/opt/k8s/cert/ca-config.json \
-profile=kubernetes /opt/k8s/cert/kube-proxy-csr.json | cfssljson -bare /opt/k8s/cert/kube-proxy

# 查看证书
[root@k8s-master01 ~]# ls /opt/k8s/cert/kube-proxy*

4.3 在 master 节点上创建kube-proxy.kubeconfig 文件

## 配置集群参数
[root@kube-master ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/k8s/cert/ca.pem \
--embed-certs=true \
--server=https://192.168.2.210:8443 \
--kubeconfig=/opt/k8s/kube-proxy.kubeconfig

## 配置客户端认证参数
[root@kube-master ~]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/k8s/cert/kube-proxy.pem \
--client-key=/opt/k8s/cert/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/opt/k8s/kube-proxy.kubeconfig

## 配置集群上下文
[root@kube-master ~]# kubectl config set-context kube-proxy@kubernetes \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/opt/k8s/kube-proxy.kubeconfig

## 配置集群默认上下文
[root@kube-master ~]# kubectl config use-context kube-proxy@kubernetes \
--kubeconfig=/opt/k8s/kube-proxy.kubeconfig

传送相关所需文件

# 传送证书和私钥至node节点
[root@k8s-master01 ~]# scp /opt/k8s/cert/kube-proxy*.pem root@k8s-node01:/opt/k8s/cert/

# 传送kube-proxy.kubeconfig至node节点
[root@k8s-master01 ~]# scp /opt/k8s/kube-proxy.kubeconfig root@k8s-node01:/opt/k8s/

4.5 在 node 节点上创建对应的 kube-proxy 的 kube-proxy 配置文件

[root@k8s-node01 ~]# vi /opt/k8s/kube-proxy.config.yaml
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: ##NODE_IP##
clientConnection:
  kubeconfig: /opt/k8s/kube-proxy.kubeconfig
clusterCIDR: 10.96.0.0/16
healthzBindAddress: ##NODE_IP##:10256
hostnameOverride: ##NODE_NAME##
kind: KubeProxyConfiguration
metricsBindAddress: ##NODE_IP##:10249
mode: "ipvs"

4.6 在 node 节点上创建 kube-proxy 得 systemd unit 文件

[root@k8s-node01 ~]# vi /etc/systemd/system/kube-proxy.service 
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=/opt/lib/kube-proxy
ExecStart=/opt/k8s/bin/kube-proxy \
  --config=/opt/k8s/kube-proxy.config.yaml \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/opt/log/kubernetes \
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

4.7 设置开机启动, 并启动并检查 kube-proxy 服务

systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy && systemctl status kube-proxy

4.8 在node节点上-->查看 ipvs 路由规则

[root@k8s-node01 ~]# /usr/sbin/ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  10.96.0.1:443 rr
  -> 192.168.2.201:6443           Masq    1      0          0         
  -> 192.168.2.202:6443           Masq    1      0          0         
  -> 192.168.2.203:6443           Masq    1      0          0     

node 节点基本组件部署完毕

上一篇 下一篇

猜你喜欢

热点阅读