k8s 搭建
2021-11-02 本文已影响0人
Flag丶
准备三台以上的服务器
$ hostname #查看主机名称
#把所有服务器ip以及主机名 加入 /etc/hosts文件
echo 10.0.8.1 master-8-1 >>/etc/hosts
echo 10.0.8.11 node-8-11 >>/etc/hosts
echo 10.0.8.12 node-8-12 >>/etc/hosts
安装kubectl、kubeadm、kubelet
#更新apt包索引并安装使用 Kubernetesapt存储库所需的包:
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
#下载公共签名密钥
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg
#添加k8s apt存储库
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] http://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
#更新apt源 安装kubectl、kubelet、kubeadm
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl
#锁定版本
sudo apt-mark hold kubelet kubeadm kubectl
安装证书
apt-get update && sudo apt-get install -y ca-certificates curl software-properties-common apt-transport-https curl
curl -s https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
tee /etc/apt/sources.list.d/kubernetes.list <<EOF
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
初始化master、node
#master节点初始化
kubeadm init --pod-network-cidr=172.30.0.0/16 --service-cidr=10.10.10.0/24 \
--token-ttl=0 \
--image-repository registry.aliyuncs.com/google_containers
#根据提示分别在master、node节点操作
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#node节点需要复制 master节点中 /etc/kuberbetes/admin.conf
#node节点加入主节点(命令是 kubeadm init 后 提示的最后一行)
kubeadm join 10.151.30.57:6443 --token 8xomlq.0cdf2pbvjs2gjho3 --discovery-token-ca-cert-hash sha256:92802317cb393682c1d1356c15e8b4ec8af2b8e5143ffd04d8be4eafb5fae368
#如果忘记了或之后添加新节点可通过<kubeadm token create --print-join-command>创建期限token 查看
#检查master状态:
kubectl get nodes
1. 如果是NotReady
2. 检查pods kubectl get pods -A
3. 如果 kube-system coredns 状态为pending
4. kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml #过程比较久需要下镜像
#(需要修改配置 kubectl edit cm kube-flannel-cfg -n kube-system
{
"Network": "10.244.0.0/16", ==> "172.30.0.0/16" #master节点初始化时 指定的地址范围
"Backend": {
"Type": "vxlan"
}
}
)
5. 如果不是以上情况 则查看日志journalctl -f -u kubelet.service
检查健康状态
root@master-101-1:~# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0 Healthy {"health":"true"}
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
vim /etc/kubernetes/manifests/kube-scheduler.yaml
--port=0 #删除这行
systemctl restart kubelet
授权docker secret
如果需要获取私有仓库的镜像则需要配置 secret
docker login
kubectl create secret docker-registry regcred \
--docker-server={你的镜像仓库服务器} \
--docker-username={你的用户名} \
--docker-password={你的密码} \
--docker-email={你的邮箱地址}
#成功创建名为 regcred的 secret,只在当前命名空间下生效 指定命名空间 添加参数 -n {namespace}
#pod中引用方式
apiVersion: v1
kind: Pod
metadata:
name: private-reg
spec:
containers:
- name: private-reg-container
image: <your-private-image>
imagePullSecrets:
- name: regcred
#参见<https://kubernetes.io/zh/docs/tasks/configure-pod-container/pull-image-private-registry/>
配置流量转发
echo net.ipv4.ip_forward=1 >> /etc/sysctl.d/10-ipv4-forwarding-on.conf
echo net.ipv4.ip_forward = 1 >> /etc/sysctl.d/k8s.conf
echo net.ipv4.vs.conntrack=1 >> /etc/sysctl.d/k8s.conf
sysctl --system
kube-proxy设置代理模式为ip_vs
为服务提供外部可访问ip
#参见<https://metallb.universe.tf/installation/>
kubectl edit configmap kube-proxy -n kube-system
# 把mode改为ipvs strictApp改成true,然后删除所有kube-proxy pod
kubectl get pod -n kube-system -o wide |grep kube-proxy | awk '{print $1}' | xargs kubectl delete pod -n kube-system
kubectl get pod -n kube-system -o wide |grep coredns | awk '{print $1}' | xargs kubectl delete pod -n kube-system
#安装metalLB
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.11.0/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.11.0/manifests/metallb.yaml
#layer2 Configuration 配置 layer2模式 ip地址范围
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
#addresses为 kubectl cluster-info 中ip地址
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 192.168.1.240-192.168.1.250