kubeadm v1.12.3安装

2018-11-06  本文已影响383人  Fonzie

使用kubeadm安装我们的kubernetes的测试环境

环境:CentOS Linux release 7.4.1708 (Core)

安装docker-Master节点

修改内核配置:

net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
vm.swappiness = 0
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
kernel.sysrq = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65023
net.ipv4.tcp_max_syn_backlog = 10240
net.ipv4.tcp_max_tw_buckets = 400000
net.ipv4.tcp_max_orphans = 60000
net.ipv4.tcp_synack_retries = 3
net.core.somaxconn = 10000
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness = 0
sysctl -p
hostnamectl set-hostname master
10.2.29.148 master
yum install iptables-devel.x86_64  iptables-services.x86_64 iptables.x86_64  -y
iptables -F
service iptables save

yum makecache
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast
# yum list docker-ce --showduplicates
Loaded plugins: fastestmirror
Loading mirror speeds from cached hostfile
Available Packages
docker-ce.x86_64                                                                            17.03.0.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.03.1.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.03.2.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.03.3.ce-1.el7                                                                                    docker-ce-stable
docker-ce.x86_64                                                                            17.06.0.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.06.1.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.06.2.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.09.0.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.09.1.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.12.0.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            17.12.1.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            18.03.0.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            18.03.1.ce-1.el7.centos                                                                             docker-ce-stable
docker-ce.x86_64                                                                            18.06.0.ce-3.el7                                                                                    docker-ce-stable
docker-ce.x86_64                                                                            18.06.1.ce-3.el7                                                                                    docker-ce-stable
yum install docker-ce-17.06.2.ce-1.el7.centos
systemctl start docker.service
systemctl enable docker.service

# vim /etc/docker/daemon.json
{
  "registry-mirrors": ["http://xxxxxxx.m.daocloud.io"],
  "log-driver": "journald",
  "log-opts": {
    "tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"
  }
}

安装配置kubeadm

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum makecache fast
yum install -y kubernetes-cni kubelet kubeadm kubectl --skip-broken
systemctl start kubelet.service
systemctl enable kubelet.service

默认情况下这里的kubeadm kubectl kubelet默认安装的都是最新版本。如何查看kubernetes的最新版本是多少?

可以通过访问下面两个地址获得:

注意:需要翻过6ZW/5Z+O6Ziy54Gr5aKZCg==的工具,记得开全局。(base64解码)

mkdir /etc/cni/net.d/ -p
cat >/etc/cni/net.d/10-mynet.conf <<-EOF
{
    "cniVersion": "0.3.0",
    "name": "mynet",
    "type": "bridge",
    "bridge": "cni0",
    "isGateway": true,
    "ipMasq": true,
    "ipam": {
        "type": "host-local",
        "subnet": "10.244.0.0/16",
        "routes": [
            {"dst": "0.0.0.0/0"}
        ]
    }
}
EOF
cat >/etc/cni/net.d/99-loopback.conf <<-EOF
{
    "cniVersion": "0.3.0",
    "type": "loopback"
}
EOF

kind: MasterConfiguration
apiVersion: kubeadm.k8s.io/v1alpha2
#kubernetesVersion: "stable"
kubernetesVersion: "v1.12.2"
apiServerCertSANs: []
#imageRepository: crproxy.trafficmanager.net:6000/google_containers
#imageRepository: mirrorgooglecontainers
imageRepository: registry.aliyuncs.com/google_containers
#imageRepository: ""
controllerManagerExtraArgs:
  horizontal-pod-autoscaler-use-rest-clients: "true"
  horizontal-pod-autoscaler-sync-period: "10s"
  node-monitor-grace-period: "10s"
  feature-gates: "AllAlpha=true"
  enable-dynamic-provisioning: "true"
apiServerExtraArgs:
  runtime-config: "api/all=true"
  feature-gates: "AllAlpha=true"
  #feature-gates: "CoreDNS=true"
networking:
  podSubnet: "10.244.0.0/16"
说这国内的几个坑

拉取镜像

# kubeadm --config kubeadm.yml config images pull
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.12.2
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.12.2
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.12.2
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.12.2
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.1
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.2.24
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.2.2
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.12.2 k8s.gcr.io/kube-proxy:v1.12.2
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.12.2 k8s.gcr.io/kube-apiserver:v1.12.2
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.12.2 k8s.gcr.io/kube-controller-manager:v1.12.2
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.12.2 k8s.gcr.io/kube-scheduler:v1.12.2
docker tag registry.aliyuncs.com/google_containers/etcd:3.2.24 k8s.gcr.io/etcd:3.2.24
docker tag registry.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
docker tag registry.aliyuncs.com/google_containers/coredns:1.2.2 k8s.gcr.io/coredns:1.2.2

kubeadm init --config kubeadm.yml  --ignore-preflight-errors all

执行成功后可以看到如下内容:

.....[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join 10.2.29.148:6443 --token 5m131g.yz6x7217jjfz5w98 --discovery-token-ca-cert-hash sha256:2258ceec06ea8e5b855273cf4d7e45f4a83c42fbd7c3cce2331984418bf504c6


mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

查看节点:

$ kubectl get nodes
NAME      STATUS     ROLES    AGE     VERSION
master1   NotReady   master   4m42s   v1.12.2

如果重启或者kubeadm reset 将清理掉cni配置,需要重新配置

mkdir /etc/cni/net.d/ -p
cat >/etc/cni/net.d/10-mynet.conf <<-EOF
{
    "cniVersion": "0.3.0",
    "name": "mynet",
    "type": "bridge",
    "bridge": "cni0",
    "isGateway": true,
    "ipMasq": true,
    "ipam": {
        "type": "host-local",
        "subnet": "10.244.0.0/16",
        "routes": [
            {"dst": "0.0.0.0/0"}
        ]
    }
}
EOF
cat >/etc/cni/net.d/99-loopback.conf <<-EOF
{
    "cniVersion": "0.3.0",
    "type": "loopback"
}
EOF

查看节点情况

$ kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
master1   Ready    master   5m43s   v1.12.2

在node节点上安装

yum makecache
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast
yum install docker-ce-17.06.2.ce-1.el7.centos
systemctl start docker.service
systemctl enable docker.service

net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
vm.swappiness = 0
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
kernel.sysrq = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.ip_local_port_range = 1024 65023
net.ipv4.tcp_max_syn_backlog = 10240
net.ipv4.tcp_max_tw_buckets = 400000
net.ipv4.tcp_max_orphans = 60000
net.ipv4.tcp_synack_retries = 3
net.core.somaxconn = 10000
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness = 0
sysctl -p
# cat /etc/docker/daemon.json
{
  "registry-mirrors": ["http://xxxxxxx.m.daocloud.io"],
  "log-driver": "journald",
  "log-opts": {
    "tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"
  }
}
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum makecache fast
yum install -y kubernetes-cni kubelet kubeadm kubectl --skip-broken
systemctl start kubelet.service
systemctl enable kubelet.service

修改host

hostnamectl set-hostname node1
vim /etc/host
10.2.29.149 node1

加入到集群中去

kubeadm join 10.2.29.148:6443 --token 5m131g.yz6x7217jjfz5w98 --discovery-token-ca-cert-hash sha256:2258ceec06ea8e5b855273cf4d7e45f4a83c42fbd7c3cce2331984418bf504c6

配置cni网络

mkdir /etc/cni/net.d/ -p
cat >/etc/cni/net.d/10-mynet.conf <<-EOF
{
    "cniVersion": "0.3.0",
    "name": "mynet",
    "type": "bridge",
    "bridge": "cni0",
    "isGateway": true,
    "ipMasq": true,
    "ipam": {
        "type": "host-local",
        "subnet": "10.244.0.0/16",
        "routes": [
            {"dst": "0.0.0.0/0"}
        ]
    }
}
EOF
cat >/etc/cni/net.d/99-loopback.conf <<-EOF
{
    "cniVersion": "0.3.0",
    "type": "loopback"
}
EOF

在master上查看节点情况:

$ kubectl get nodes
NAME      STATUS   ROLES    AGE   VERSION
master1   Ready    master   22m   v1.12.2
node1     Ready    <none>   67s   v1.12.2
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml
$ kubectl get pods -n kube-system
NAME                              READY   STATUS              RESTARTS   AGE
coredns-5f5b8b4fdb-s8672          0/1     CrashLoopBackOff    8          25m
coredns-5f5b8b4fdb-zpn49          0/1     CrashLoopBackOff    8          25m
etcd-master1                      1/1     Running             0          24m
kube-apiserver-master1            1/1     Running             0          24m
kube-controller-manager-master1   1/1     Running             0          24m
kube-flannel-ds-jkh96             0/1     Init:0/1            0          2m19s
kube-flannel-ds-nwq7t             1/1     Running             0          2m19s
kube-proxy-4q6sv                  1/1     Running             0          25m
kube-proxy-7wjvq                  0/1     ContainerCreating   0          4m32s
kube-scheduler-master1            1/1     Running             0          24m

在node上拉取pause容器

# docker pull registry.aliyuncs.com/google_containers/pause:3.1
3.1: Pulling from google_containers/pause
cf9202429979: Pull complete
Digest: sha256:759c3f0f6493093a9043cc813092290af69029699ade0e3dbe024e968fcb7cca
Status: Downloaded newer image for registry.aliyuncs.com/google_containers/pause:3.1
docker tag registry.aliyuncs.com/google_containers/pause:3.1  k8s.gcr.io/pause:3.1

在master上查看

$ kubectl get pods -n kube-system
NAME                              READY   STATUS    RESTARTS   AGE
coredns-5f5b8b4fdb-s8672          1/1     Running   9          50m
coredns-5f5b8b4fdb-zpn49          1/1     Running   9          50m
etcd-master1                      1/1     Running   0          49m
kube-apiserver-master1            1/1     Running   0          49m
kube-controller-manager-master1   1/1     Running   0          49m
kube-flannel-ds-jkh96             1/1     Running   0          26m
kube-flannel-ds-nwq7t             1/1     Running   0          26m
kube-proxy-4q6sv                  1/1     Running   0          50m
kube-proxy-7wjvq                  1/1     Running   0          29m
kube-scheduler-master1            1/1     Running   0          49m
上一篇下一篇

猜你喜欢

热点阅读