kubeadm HA全记录

2017-09-29  本文已影响0人  getsu
  1. 安装前准备
  1. etcd集群
#!/bin/bash
docker stop etcd && docker rm etcd
rm -rf /var/lib/etcd-cluster
mkdir -p /var/lib/etcd-cluster

docker run -d \
--restart always \
-v /etc/ssl/certs:/etc/ssl/certs \
-v /var/lib/etcd-cluster:/var/lib/etcd \
-p 4001:4001 \
-p 2380:2380 \
-p 2379:2379 \
--name etcd \
gcr.io/google_containers/etcd-amd64:3.0.17 \
etcd --name=etcd0 \
--advertise-client-urls=http://172.25.16.120:2379,http://172.25.16.120:4001 \
--listen-client-urls=http://0.0.0.0:2379,http://0.0.0.0:4001 \
--initial-advertise-peer-urls=http://172.25.16.120:2380 \
--listen-peer-urls=http://0.0.0.0:2380 \
--initial-cluster-token=9477af68bbee1b9ae037d6fd9e7efefd \
--initial-cluster=etcd0=http://172.25.16.120:2380,etcd1=http://172.25.16.121:2380,etcd2=http://172.25.16.122:2380 \
--initial-cluster-state=new \
--auto-tls \
--peer-auto-tls \
--data-dir=/var/lib/etcd

#!/bin/bash
docker stop etcd && docker rm etcd
rm -rf /var/lib/etcd-cluster
mkdir -p /var/lib/etcd-cluster

docker run -d \
--restart always \
-v /etc/ssl/certs:/etc/ssl/certs \
-v /var/lib/etcd-cluster:/var/lib/etcd \
-p 4001:4001 \
-p 2380:2380 \
-p 2379:2379 \
--name etcd \
gcr.io/google_containers/etcd-amd64:3.0.17 \
etcd --name=etcd1 \
--advertise-client-urls=http://172.25.16.121:2379,http://172.25.16.120:4001 \
--listen-client-urls=http://0.0.0.0:2379,http://0.0.0.0:4001 \
--initial-advertise-peer-urls=http://172.25.16.121:2380 \
--listen-peer-urls=http://0.0.0.0:2380 \
--initial-cluster-token=9477af68bbee1b9ae037d6fd9e7efefd \
--initial-cluster=etcd0=http://172.25.16.120:2380,etcd1=http://172.25.16.121:2380,etcd2=http://172.25.16.122:2380 \
--initial-cluster-state=new \
--auto-tls \
--peer-auto-tls \
--data-dir=/var/lib/etcd

#!/bin/bash
docker stop etcd && docker rm etcd
rm -rf /var/lib/etcd-cluster
mkdir -p /var/lib/etcd-cluster

docker run -d \
--restart always \
-v /etc/ssl/certs:/etc/ssl/certs \
-v /var/lib/etcd-cluster:/var/lib/etcd \
-p 4001:4001 \
-p 2380:2380 \
-p 2379:2379 \
--name etcd \
gcr.io/google_containers/etcd-amd64:3.0.17 \
etcd --name=etcd2 \
--advertise-client-urls=http://172.25.16.122:2379,http://172.25.16.122:4001 \
--listen-client-urls=http://0.0.0.0:2379,http://0.0.0.0:4001 \
--initial-advertise-peer-urls=http://172.25.16.122:2380 \
--listen-peer-urls=http://0.0.0.0:2380 \
--initial-cluster-token=9477af68bbee1b9ae037d6fd9e7efefd \
--initial-cluster=etcd0=http://172.25.16.120:2380,etcd1=http://172.25.16.121:2380,etcd2=http://172.25.16.122:2380 \
--initial-cluster-state=new \
--auto-tls \
--peer-auto-tls \
--data-dir=/var/lib/etcd

$ docker exec -ti etcd ash

$ etcdctl member list
19dcd68c1a5b8d7d: name=etcd2 peerURLs=http://172.25.16.122:2380 clientURLs=http://172.25.16.122:2379,http://172.25.16.122:4001 isLeader=true
688e88a7e1b4e844: name=etcd0 peerURLs=http://172.25.16.120:2380 clientURLs=http://172.25.16.120:2379,http://172.25.16.120:4001 isLeader=false
692a555d87ac214c: name=etcd1 peerURLs=http://172.25.16.121:2380 clientURLs=http://172.25.16.121:2379,http://172.25.16.121:4001 isLeader=false

$ etcdctl cluster-health
member 19dcd68c1a5b8d7d is healthy: got healthy result from http://172.25.16.122:2379
member 688e88a7e1b4e844 is healthy: got healthy result from http://172.25.16.120:2379
member 692a555d87ac214c is healthy: got healthy result from http://172.25.16.121:2379
cluster is healthy
  1. 在master1上通过kubeadm安装
 apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
kubernetesVersion: v1.7.5
networking:
  podSubnet: 10.244.0.0/16
apiServerCertSANs:
- centos-master-1
- centos-master-2
- centos-master-3
- 172.25.16.120
- 172.25.16.121
- 172.25.16.122
- 172.25.16.228
etcd:
  endpoints:
  - http://172.25.16.120:2379
  - http://172.25.16.121:2379
  - http://172.25.16.122:2379
  $ vi ~/.bashrc
export KUBECONFIG=/etc/kubernetes/admin.conf

$ source ~/.bashrc
  1. 安装flannel组件
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
 name: flannel
rules:
 - apiGroups:
     - ""
   resources:
     - pods
   verbs:
     - get
 - apiGroups:
     - ""
   resources:
     - nodes
   verbs:
     - list
     - watch
 - apiGroups:
     - ""
   resources:
     - nodes/status
   verbs:
     - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
 name: flannel
roleRef:
 apiGroup: rbac.authorization.k8s.io
 kind: ClusterRole
 name: flannel
subjects:
- kind: ServiceAccount
 name: flannel
 namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
 name: flannel
 namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
 name: kube-flannel-cfg
 namespace: kube-system
 labels:
   tier: node
   app: flannel
data:
 cni-conf.json: |
   {
     "name": "cbr0",
     "type": "flannel",
     "delegate": {
       "isDefaultGateway": true
     }
   }
 net-conf.json: |
   {
     "Network": "10.244.0.0/16",
     "Backend": {
       "Type": "host-gw"
     }
   }
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
 name: kube-flannel-ds
 namespace: kube-system
 labels:
   tier: node
   app: flannel
spec:
 template:
   metadata:
     labels:
       tier: node
       app: flannel
   spec:
     hostNetwork: true
     nodeSelector:
       beta.kubernetes.io/arch: amd64
     tolerations:
     - key: node-role.kubernetes.io/master
       operator: Exists
       effect: NoSchedule
     serviceAccountName: flannel
     containers:
     - name: kube-flannel
       image: quay.io/coreos/flannel:v0.8.0-amd64
       command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr"]
       securityContext:
         privileged: true
       env:
       - name: POD_NAME
         valueFrom:
           fieldRef:
             fieldPath: metadata.name
       - name: POD_NAMESPACE
         valueFrom:
           fieldRef:
             fieldPath: metadata.namespace
       volumeMounts:
       - name: run
         mountPath: /run
       - name: flannel-cfg
         mountPath: /etc/kube-flannel/
     - name: install-cni
       image: quay.io/coreos/flannel:v0.8.0-amd64
       command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
       volumeMounts:
       - name: cni
         mountPath: /etc/cni/net.d
       - name: flannel-cfg
         mountPath: /etc/kube-flannel/
     volumes:
       - name: run
         hostPath:
           path: /run
       - name: cni
         hostPath:
           path: /etc/cni/net.d
       - name: flannel-cfg
         configMap:
           name: kube-flannel-cfg
  1. Master HA配置
scp -r /etc/kubernetes/ master2:/etc/
scp -r /etc/kubernetes/ master3:/etc/
  1. 修改Master配置

    • 在master2、master3上修改kube-apiserver.yaml的配置,${HOST_IP}改为本机IP
      $ vi /etc/kubernetes/manifests/kube-apiserver.yaml
     - --advertise-address=${HOST_IP}
    
$ vi /etc/kubernetes/kubelet.conf

server: https://${HOST_IP}:6443
  $ vi /etc/kubernetes/admin.conf

  server: https://${HOST_IP}:6443
  $ vi /etc/kubernetes/controller-manager.conf

  server: https://${HOST_IP}:6443
$ vi /etc/kubernetes/scheduler.conf

  server: https://${HOST_IP}:6443
  $ systemctl daemon-reload && systemctl restart docker kubelet
  1. 在master1,master2,master3上安装keepalived
yum install -y keepalived
systemctl enable keepalived && systemctl restart keepalived
$ vi /etc/keepalived/check_apiserver.sh
#!/bin/bash
err=0
for k in $( seq 1 10 )
do
    check_code=$(ps -ef|grep kube-apiserver | wc -l)
    if [ "$check_code" = "1" ]; then
        err=$(expr $err + 1)
        sleep 5
        continue
    else
        err=0
        break
    fi
done
if [ "$err" != "0" ]; then
    echo "systemctl stop keepalived"
    /usr/bin/systemctl stop keepalived
    exit 1
else
    exit 0
fi


chmod a+x /etc/keepalived/check_apiserver.sh

$ vi /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
    script "/etc/keepalived/check_apiserver.sh"
    interval 2
    weight -5
    fall 3  
    rise 2
}
vrrp_instance VI_1 {
    state ${STATE}
    interface ${INTERFACE_NAME}
    mcast_src_ip ${HOST_IP}
    virtual_router_id 51
    priority ${PRIORITY}
    advert_int 2
    authentication {
        auth_type PASS
        auth_pass 4be37dc3b4c90194d1600c483e10ad1d
    }
    virtual_ipaddress {
        ${VIRTUAL_IP}
    }
    track_script {
       chk_apiserver
    }
}
$ systemctl restart keepalived
$ ping 172.25.16.228
  1. kube-proxy配置
$ kubectl edit -n kube-system configmap/kube-proxy
        server: https://192.168.60.80:8443
systemctl restart docker kubelet keepalived
    kubectl edit configmaps cluster-info -n kube-public

server: https://${HOST_IP}:6443
  1. 加入node
  1. 禁止master2,master3上发布应用
kubectl taint nodes master-2 node-role.kubernetes.io/master=true:NoSchedule
kubectl taint nodes master-3 node-role.kubernetes.io/master=true:NoSchedule
上一篇 下一篇

猜你喜欢

热点阅读