Ubuntu20.04LTS 搭建kubernetes集群环境

2021-08-04  本文已影响0人  攻城老狮
  1. 更换清华源
# 备份源
sudo mv /etc/apt/sources.list /etc/apt/sources.list.bak

# 更改源
cd /etc/apt/
sudo vim sources.list

# 添加
deb https://mirror.tuna.tsinghua.edu.cn/ubuntu-ports/ focal main restricted universe multiverse
deb https://mirror.tuna.tsinghua.edu.cn/ubuntu-ports/ focal-updates main restricted universe multiverse
deb https://mirror.tuna.tsinghua.edu.cn/ubuntu-ports/ focal-backports main restricted universe multiverse
deb https://mirror.tuna.tsinghua.edu.cn/ubuntu-ports/ focal-proposed main restricted universe multiverse

# 更新源
sudo apt-get update

# 安装必要插件
sudo apt-get install vim
sudo apt-get install net-tools
sudo apt-get install openssh-server
  1. root用户配置
# 修改root用户密码
sudo passwd root

# 进入root用户
su

# 允许远程登录root
vim /etc/ssh/sshd_config

PermitRootLogin yes

# 重启服务
service ssh restart
  1. 前置配置
# 1.交换分区关闭
sudo swapoff -a

# 避免开启启动交换空间
注释 /etc/fstab 的 swap

# 查看是否关闭成功
free -h

# 2.关闭防火墙
ufw disable

# 3.修改时间同步
sudo timedatectl set-timezone Asia/Shanghai
sudo systemctl restart rsyslog 

# 4.确保每个机器不会自动suspend
sudo systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target

# 5.设置iptables可以看到bridged traffic
# 先确认Linux内核加载了br_netfilter模块
lsmod | grep br_netfilter
# 确保sysctl配置中net.bridge.bridge-nf-call-iptables的值设置为了1,同时调整k8s的swappiness参数
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
sudo sysctl --system

# 6.修改/etc/sysctl.d/10-network-security.conf
sudo vim /etc/sysctl.d/10-network-security.conf

#将下面两个参数的值从2修改为1
#net.ipv4.conf.default.rp_filter=1
#net.ipv4.conf.all.rp_filter=1

#然后使之生效
sudo sysctl --system
  1. 安装docker
# 安装docker
curl -sSL https://get.daocloud.io/docker | sh 

# 查看是否安装成功
sudo docker version

# 配置镜像加速
sudo vim /etc/docker/daemon.json

{
    "registry-mirrors": [
        "https://registry.docker-cn.com"
    ]
}

# 重启docker服务
sudo systemctl restart docker
sudo systemctl enable docker

# 查看是否配置成功
sudo docker info
  1. 安装kubeadm,kubelet,kubectl
# 更新下载依赖
sudo apt-get update && sudo apt-get install -y ca-certificates curl software-properties-common apt-transport-https curl
# 添加kubernetes源
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat << EOF >/etc/apt/sources.list.d/kubernetes.list
> deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
> EOF

# 更新源与安装kube
apt-get update
apt-get install kubeadm=1.17.4-00 kubelet=1.17.4-00 kubectl=1.17.4-00 -y

# 开机自动启动
sudo systemctl enable kubelet

# 查看镜像需要的版本信息
kubeadm config images list

# 下载镜像
images=(
  kube-apiserver:v1.17.17
  kube-controller-manager:v1.17.17
  kube-scheduler:v1.17.17
  kube-proxy:v1.17.17
  pause:3.1
  etcd:3.4.3-0
  coredns:1.6.5
)
for imageName in ${images[@]};do
    docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
  1. 克隆三台虚拟机作为k8s集群(master node1 node2)

  2. 修改三台克隆机的hostname

# 修改主机名
hostnamectl set-hostname k8s-master
# hostnamectl set-hostname k8s-node1
# hostnamectl set-hostname k8s-node2

# 修改cloud
sudo vim /etc/cloud/cloud.cfg

preserve_hostname: true

# 重新登录
  1. 集群初始化
# 创建集群 --apiserver-advertise-address改为master本机ip
kubeadm init --apiserver-advertise-address=10.211.55.8 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12

# 创建必要文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm join 10.211.55.8:6443 --token 0x4bqq.dly9q20n28y7kr2h \
    --discovery-token-ca-cert-hash sha256:4066faa9a98c7eb8e4cab7f2e9952ca16c068f3d6a8024c9e2b2e09d0062e436 
  1. 部署网络(master操作)
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml

# 查看集群状态(Ready)
kubectl get nodes

# 新令牌生成方式
kubeadm token create --print-join-command
  1. 服务部署
# 部署nginx
kubectl create deployment nginx --image=nginx
# 暴露端口
kubectl expose deployment nginx --port=80 --type=NodePort
# 查看服务状态
kubectl get pod,svc

NAME                         READY   STATUS              RESTARTS   AGE
pod/nginx-86c57db685-bhm5x   0/1     ContainerCreating   0          18s

NAME                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        38m
service/nginx        NodePort    10.104.127.211   <none>        80:31985/TCP   8s

# 访问地址查看nginx服务
10.211.55.8:31985
  1. 部署dashboard(k8s可视化界面)
# 1.部署dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml

# 2.设置修改访问的类型
# type: ClusterIP 改为 type: NodePort
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard

# 3.找到外界访问的端口
kubectl get svc -A |grep kubernetes-dashboard

# 4.访问: https://集群任意IP:端口      https://10.211.55.18:30187

# 5.创建访问账号 准备一个yaml文件; vi dash.yml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kubernetes-dashboard

# 6.部署admin-user
kubectl apply -f dash.yaml

# 7.获取访问令牌
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
image-20220416221129780.png
  1. 卸载k8s
# 1.清理运行到k8s群集中的node
kubectl delete node --all
# 2.停止所有k8s服务
for service in kube-apiserver kube-controller-manager kubectl kubelet kube-proxy kube-scheduler; 
do
      systemctl stop $service
done
# 3.重置kubeadm
kubeadm reset -f
# 4.删除文件
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd
# 5.卸载依赖
apt-get remove kubeadm=1.17.4-00 kubelet=1.17.4-00 kubectl=1.17.4-00
  1. 卸载docker
# 1.卸载安装时自动安装的所有包
apt-get autoremove docker docker-ce docker-engine  docker.io  containerd runc
# 2.删除docker 其他没有卸载
dpkg -l |grep ^rc|awk '{print $2}' |sudo xargs dpkg -P # 删除无用的相关的配置文件
apt-get autoremove docker-ce-*
# 3.删除相关配置
rm -rf /etc/systemd/system/docker.service.d
rm -rf /var/lib/docker
# 4.验证是否删除
docker --version
上一篇下一篇

猜你喜欢

热点阅读