搭建k8s

kubernetes核心资源管理

2021-01-23  本文已影响0人  挑战_bae7

1.pod的使用

mkdir /opt/yml  -p 
cd /opt/yml
[root@k8s-master yaml]# cat nginx.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: default
  labels:
    app: nginx
spec:
  containers:
  - name: nginx
    image: nginx:latest
    imagePullPolicy: IfNotPresent
    ports:
        - containerPort: 80
  restartPolicy: Always
kubectl create -f nginx.yaml 创建
kubectl get nodes --show-labels 查看node的标签
kubectl get pod 查看
kubectl get pod -o wide 查看详细信息
kubectl get namespace 查看命名空间
kubectl get pods -o wide -l app=nginx 只查看需要的信息
kubectl descrie pods  查看pod的具体信息 排错
kubectl delete pod nginx  删除
kubectl logs nginx  查看日志
kubectl exec -it  nginx /bin/sh 进入容器
kubectl exec -it pod-demo -c myapp -- /bin/sh  进入pod-demo中myapp容器中
通过pod ip 访问 curl 10.244.1.7 
vim nginx.yaml  
 image: ikubernetes/myapp:v2
kubectl replace  --force -f k8s_pod.yml 升级 先删除旧的 再新建新的
图片.png

1.1 pod 共享网络 存储

共享网络:
一个 Pod 可以有多个容器,彼此间共享网络和存储资源,每个 Pod 中有一个 Pause 容器保存所有的容器状态, 通过管理 pause 容器,达到管理 pod 中所有容器的效果
一个pod中所有容器都加入pause中 达到一个命名空间 共享网络
共享存储:
引入共享数据卷持久化存储文件

1.2 pod image 拉取方式

 imagesPullPolicy: IfNotPresent |  Always | Never
Always 总是拉取镜像
IfNotPresent 本地有则使用本地镜像,不拉取  
Never 只使用本地镜像,从不拉取,即使本地没有
如果省略imagePullPolicy  策略为IfNotPresent 

1.3 pod 资源限制

https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
resources:
      limits:   最大
        cpu: "250m"
        memory: 2048Mi
      requests:  最小
        cpu: "128m"
        memory: 1024Mi
1核=1000m
250m=0.25核

1.4 pod重启机制

restartPolicy: [Always|Never|OnFailure]//重启策略,默认值为 Always
Always 不停重启
Never 从不重启    批量任务 或者 任务计划中
OnFailure 退出状态码非0的时候 才重启

1.5 pod 健康检查

livenessProbe 存活检查 如果检查失败 将杀死容器 根据pod的重启策略操作
readinessProbe 就绪检查 如果检查失败 k8s将pod从service endpoint中剔除
检查方法:
 ExecAction,在容器中执行特定的命令,命令退出返回0表示成功
 TCPSocketAction,根据容器IP地址及特定的端口进行TCP检查,端口开放表示成功
 HTTPGetAction,根据容器IP、端口及访问路径发起一次HTTP请求,如果返回码在200到400之间表示成功
每种检查动作都可能有三种返回状态。
Success,表示通过了健康检查
Failure,表示没有通过健康检查
Unknown,表示检查动作失败

1.5.1 livenessProbe 存活检查 exec

[root@k8s-master yaml]# vim livenessProbe.yaml 
apiVersion: v1
kind: Pod
metadata:
  labels:
    app: liveness-exec
  name: liveness-exec
spec:
  containers:
  - name: liveness-exec
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    args:
    - /bin/sh
    - -c
    - touch /tmp/healthy; sleep 20; rm -rf /tmp/healthy; sleep 600
    livenessProbe:
      exec:
        command:
        - test
        - -c
        - /tmp/healthy
容器会不停的重启 满足条件

1.5.2 HTTP Health Check

apiVersion: v1
kind: Pod
metadata:
  labels:
    test: httpget
  name: liveness-httpget
spec:
  containers:
  - name: liveness-httpget
    image: nginx:1.12-alpine
    ports:
    - name: http
      containerPort: 80
    lifecycle: 
      postStart:     pod启动后
        exec:
          command:
          - /bin/sh
          - -c
          - 'echo healthy > /usr/share/nginx/html/healthz '
    livenessProbe:     生命探测  
      httpGet:
        path: /healthz
        port: http
        scheme: HTTP
      periodSeconds: 2   每2分钟探测一次
      failureThreshold: 2   每2分钟 失败探测
      initialDelaySeconds: 3  pod启动后延迟3分钟探测

1.5.3 TCP Socket

apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
    app: node
  name: liveness-tcp
spec:
  containers:
  - name: goproxy
    image: docker.io/googlecontainer/goproxy:0.1
    ports:
    - containerPort: 8080
    readinessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 5
      periodSeconds: 10
    livenessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 15
      periodSeconds: 20

1.6 pod调度

1. pod 资源限制 request
2. pod的节点选择器 nodeSelector调度  条件不满足无法调度
kubectl get nodes --show-labels 查看node中标签  
kubectl label node k8s-node1 env_role=dev  手动添加标签
3.节点亲和性  
         硬亲和性 必须满足 等待状态 无法调度
         软亲和性条件有调度条件没有 也能调度
        In Notin 

1.6.1 label节点选择

apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: default
  labels:
    app: nginx
spec:
  containers:
  - name: nginx
    image: nginx:latest
    imagePullPolicy: IfNotPresent        
    ports:
        - containerPort: 80
  restartPolicy: Always
  nodeSelector:
    kubernetes.io/hostname: k8s-node1

1.6.2 硬亲和

apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: default
  labels:
    app: nginx
spec:
  containers:
  - name: nginx
    image: nginx:latest
    imagePullPolicy: IfNotPresent
    ports:
        - containerPort: 80
  restartPolicy: Always
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: kubernetes.io/hostname
            operator: In
            values:
            - k8s-node2

1.6.3 软亲和

apiVersion: v1
kind: Pod
metadata:
  name: nginx
  namespace: default
  labels:
    app: nginx
spec:
  containers:
  - name: nginx
    image: nginx:latest
    imagePullPolicy: IfNotPresent
    ports:
        - containerPort: 80
  restartPolicy: Always
  affinity:
    nodeAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 1
        preference:
          matchExpressions:
          - key: kubernetes.io/hostname
            operator: In
            values:
            - k8s-node1

1.8 pod污点

污点(taints) node节点属性 
[root@k8s-master yaml]# kubectl describe node k8s-master |grep Taint 查看污点
Taints:             node-role.kubernetes.io/master:NoSchedule
污点值:
NoSchedule 这个节点一定不被调度
PreferNoSchedule 尽量不被调度
NoExecute 不会调度并且会驱逐Node中已有的pod
kubectl taint node k8s-node1 env_role=yes:NoSchedule  (env_role=yes 名称可以随意)
kubectl create deployment web --image=nginx
kubectl scale deployment web --replicas=5 发现pod 都在node2上
kubectl taint node k8s-node1 env_role:NoSchedule- 清除污点
容忍(tolerations)
$ kubectl taint nodes node01 key=value:NoSchedule
kubectl taint node k8s-node1 env_role=yes:NoSchedule
    tolerations:
    - key: "key" 填写env_role
      operator: "Equal"
      value: "value"  填写yes
      effect: "NoScheduale"
调度master 中
tolerations:
- key: "node-role.kubernetes.io/master"
  operator: "Exists"
  effect: "NoSchedule"

2. Controller

pod 是通过controller实现应用的运维 如伸缩 滚动升级等等
pod 域controller 之间通过label标签建立关系 

2.1 deployment deploy

部署无状态应用
管理pod 和ReplicaSet
部署、滚动升级等功能
应用场景 web服务 微服务
kubectl create deployment web --image=nginx --dry-run=client -o yaml >web.yaml 导出
kubectl apply -f web.yaml 应用
kubectl expose deployment web --port=80 --type=NodePort --target-port=80 --name web -o yaml >service.yaml 对外暴露端口
kubectl apply -f service.yaml 应用
kubectl edit deployment web 在线修改里面配置
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:   关联
    app: web
  name: web
spec:
  replicas: 3
  selector:
    matchLabels:   关联
      app: web
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: web
    spec:
      containers:
      - image: nginx
        name: nginx
        resources: {}
status: {}
deployment 更新回滚
kubectl create deployment wangye --image=ikubernetes/myapp:v1 创建
kubectl set image deployment wangye myapp=ikubernetes/myapp:v2 更新版本
kubectl rollout status deployment wangye 查看升级状态
kubectl rollout histort deployment wangye 查看升级的历史
kubectl rollout undo deployment wangye 回滚到上一个版本
kubectl rollout undo deployment wangye --to-revision=2 回滚到指定版本
kubectl scale deployment wangye --replicas=10 弹性伸缩

2.2 service srv

1.防止 pod失联
2.服务发现 负载均衡 根据标签关联
kubectl expose --help
ClusterIP 集群内部访问 默认设置
NodePort 对外访问应用使用 
LoadBalancer 对外访问应用使用 公有云
访问:web.default.svc.cluster.local pod会解析到svc中的ip

2.2.1 NodePort

创建一个nodePort类型的service,让节点外主机可以访问到服务.
vim  myapp-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: myapp
  namespace: default
spec:
  selector:
    app: myapp
    release: canary
  clusterIP: 10.99.99.99  #此地址设定为固定很容易冲突, 建议最好不写,让其自动分配.
  type: NodePort
  ports:
  - port: 80          #设置Service对外提供服务的端口
    targetPort: 80    # 设置Pod对外提供服务的端口.
    protocol: TCP  #端口类型
    nodePort: 30080   #可以自己指定端口 前提不冲突

2.2.2 ClusterIP

[root@k8s-master ~]# cat myapp-svc1.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myapp1
  namespace: default
spec:
  selector:
    app: myapp1
  clusterIP: 10.99.99.98  #此地址设定为固定很容易冲突, 建议最好不写,让其自动分配.
  type: ClusterIP
  ports:
  - port: 80          #设置Service对外提供服务的端口
    targetPort: 80    # 设置Pod对外提供服务的端口.
    protocol: TCP  #端口类型

2.2.3 HeadLiness 类型

[root@k8s-master ~]# cat myapp-svc2.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myapp2
  namespace: default
spec:
  selector:
    app: myapp2
  clusterIP: None  #跟ClusterIP唯一区别在此处 node
  type: ClusterIP
  ports:
  - port: 80          #设置Service对外提供服务的端口
    targetPort: 80    # 设置Pod对外提供服务的端口.
    protocol: TCP  #端口类型
[root@k8s-master ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
blog-mysql   ClusterIP   10.110.225.176   <none>        3306/TCP       73m
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP        3d19h
myapp        NodePort    10.99.99.99      <none>        80:30080/TCP   9m24s
myapp1       ClusterIP   10.99.99.98      <none>        80/TCP         3m54s
myapp2       ClusterIP   None             <none>        80/TCP         13s

kubectl exec -it nginx-f857964c8-fw2w4 -- /bin/bash 进入任意pod中查看
root@nginx-f857964c8-fw2w4:/# cat /etc/resolv.conf 
nameserver 10.96.0.10 DNS解析
search default.svc.cluster.local svc.cluster.local cluster.local
options ndots:5
[root@k8s-master ~]# kubectl get svc -n kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE
kube-dns   ClusterIP   10.96.0.10   <none>        53/UDP,53/TCP,9153/TCP   3d19h
dig @10.96.0.10 php.default.svc.cluster.local  解析域名访问

2.2.4 HeadLiness 类型 将外部服务映射到pod中

[root@k8s-master ~]# cat myapp-svc3.yaml 
apiVersion: v1
kind: Service
metadata:
  name: myapp3
  namespace: default
spec:
  type: ExternalName
  externalName: www.baidu.com
测试解析:
 dig @10.96.0.10 myapp3.default.svc.cluster.local
kubectl expose deployment web --port=80 --type=NodePort --target-port=80 --name web --dry-run=client -o yaml >service.yaml
[root@k8s-master yaml]# cat service.yaml 
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: null
  labels:
    app: web
  name: web
spec:
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: web
  type: NodePort
status:
  loadBalancer: {}

2.3 有状态statefulset

apiVersion: v1
kind: Service
metadata:
  name: nginx
  labels:
    app: nginx
spec:
  ports:
  - port: 80
    name: web
  clusterIP: None
  selector:
    app: nginx

---

apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: nginx-statefulset
  namespace: default
spec:
  serviceName: nginx
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80
kubectl get pods -o wide 名称唯一不重复
nginx-statefulset-0   1/1     Running   0          82s   10.244.2.5   k8s-node2   <none>           <none>
nginx-statefulset-1   1/1     Running   0          20s   10.244.1.4   k8s-node1   <none>           <none>
nginx-statefulset-2   1/1     Running   0          10s   10.244.2.6   k8s-node2   <none>           <none>
[root@k8s-master yaml]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        36h
nginx        ClusterIP   None            <none>        80/TCP         3m13s
web          NodePort    10.106.212.87   <none>        80:31624/TCP   35h
访问:nginx-statefulset-0.nginx.default.svc.cluster.local 
主机名称.service名称.命名空间默认default.svc.cluster.local
kubectl exec -it web-5fddd679d-c2vmg -- /bin/sh
/ # ping nginx-statefulset-0.nginx.default.svc.cluster.local
PING nginx-statefulset-0.nginx.default.svc.cluster.local (10.244.2.5): 56 data bytes
64 bytes from 10.244.2.5: seq=0 ttl=62 time=0.950 ms
64 bytes from 10.244.2.5: seq=1 ttl=62 time=1.007 ms

2.4 daemonset 部署守护进程

在每个node上运行一个pod 新加入的node也同样运行在一个pod里面  如容器监控工具
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: ds-test 
  labels:
    app: filebeat
spec:
  selector:
    matchLabels:
      app: filebeat
  template:
    metadata:
      labels:
        app: filebeat
    spec:
      containers:
      - name: logs
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - name: varlog
          mountPath: /tmp/log
      volumes:
      - name: varlog
        hostPath:
          path: /var/log
kubectl apply -f daemonset.yaml 
[root@k8s-master yaml]# kubectl get daemonset
NAME      DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
ds-test   2         2         2       2            2           <none>          28s
kubectl exec -it ds-test-6rhwg -- /bin/bash 
root@ds-test-6rhwg:~# ls /tmp/log/  查看当前的宿主机中的日志
anaconda       chrony     dmesg           maillog        messages-20210124  secure        spooler-20210124
audit          containers     dmesg.old       maillog-20210123   pods           secure-20210123   tallylog

2.5 job 一次性任务

求取圆周率
apiVersion: batch/v1
kind: Job
metadata:
  name: pi
spec:
  template:
    spec:
      containers:
      - name: pi
        image: perl
        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
      restartPolicy: Never
  backoffLimit: 4
kubectl get jobs 查看 完成后显示 Completed
[root@k8s-master yaml]# kubectl logs pi-pdlz9   查看计算的圆周率
3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679821480865132823066470938446095505822317253594081284811174502841027019385211055596446229489549303819644288109756659334461284756482337867831652712019091456485669234603486104543266482133936072602491412737245870066063155881748815209209628292540917153643678925903600113305305488204665213841469519415116094330572703657595919530921861173819326117931051185480744623799627495673518857527248912279381830119491298336733624406566430860213949463952247371907021798609437027705392171762931767523846748184676694051320005681271452635608277857713427577896091736371787214684409012249534301465495853710507922796892589235420199561121290219608640344181598136297747713099605187072113499999983729780499510597317328160963185950244594553469083026425223082533446850352619311881710100031378387528865875332083814206171776691473035982534904287554687311595628638823537875937519577818577805321712268066130019278766111959092164201989380952572010654858632788659361533818279682303019520353018529689957736225994138912497217752834791315155748572424541506959508295331168617278558890750983817546374649393192550604009277016711390098488240128583616035637076601047101819429555961989467678374494482553797747268471040475346462080466842590694912933136770289891521047521620569660240580381501935112533824300355876402474964732639141992726042699227967823547816360093417216412199245863150302861829745557067498385054945885869269956909272107975093029553211653449872027559602364806654991198818347977535663698074265425278625518184175746728909777727938000816470600161452491921732172147723501414419735685481613611573525521334757418494684385233239073941433345477624168625189835694855620992192221842725502542568876717904946016534668049886272327917860857843838279679766814541009538837863609506800642251252051173929848960841284886269456042419652850222106611863067442786220391949450471237137869609563643719172874677646575739624138908658326459958133904780275901

2.6 crontab 周期性任务

apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: hello
spec:
  schedule: "*/1 * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
          - name: hello
            image: busybox
            args:
            - /bin/sh
            - -c
            - date; echo Hello from the Kubernetes cluster
          restartPolicy: OnFailure
[root@k8s-master yaml]# kubectl get cronjob
NAME    SCHEDULE      SUSPEND   ACTIVE   LAST SCHEDULE   AGE
hello   */1 * * * *   False     0        <none>          26s
[root@k8s-master yaml]# kubectl logs hello-1611540300-m64qs
Mon Jan 25 02:05:16 UTC 2021
Hello from the Kubernetes cluster
[root@k8s-master yaml]# kubectl get pods
NAME                     READY   STATUS      RESTARTS   AGE
hello-1611540300-m64qs   0/1     Completed   0          2m35s
hello-1611540360-ghb97   0/1     Completed   0          93s
hello-1611540420-6ndkb   0/1     Completed   0          33s

2.7 secret 传递变量 加密

[root@k8s-master yaml]# echo -n "admin" |base64  base64编码
YWRtaW4=
vim secret.yaml
apiVersion: v1
kind: Secret
metadata:
  name: mysecret
type: Opaque
data:
  username: YWRtaW4=
  password: MWYyZDFlMmU2N2Rm
 kubectl apply -f secret.yaml 
使用secret 存储dockerhub账户密码 拉取镜像
kubectl create secret docker-registry secret名字 \
  --docker-server=<你的镜像仓库服务器> \
  --docker-username=<你的用户名> \
  --docker-password=<你的密码> \
  --docker-email=<你的邮箱地址>
ilinux@k8s-master:~$ vim dockerhub.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: private-yickshun-ssr
spec:
  containers:
  - name: private-yickshun-ssr
    image: onlinemy/yickshun-ssr:latest
  imagePullSecrets:
  - name: secret名字

2.7.1 以变量的方式挂载

apiVersion: v1
kind: Pod
metadata:
  name: mypod
spec:
  containers:
  - name: nginx
    image: nginx
    env:
      - name: SECRET_USERNAME
        valueFrom:
          secretKeyRef:
            name: mysecret
            key: username
      - name: SECRET_PASSWORD
        valueFrom:
          secretKeyRef:
            name: mysecret
            key: password
kubectl apply -f env.yaml
kubectl exec -it mypod -- /bin/bash
root@mypod:/# echo $SECRET_USERNAME
admin
root@mypod:/# echo $SECRET_PASSWORD
1f2d1e2e67df

2.7.2 以文件的方式挂载

apiVersion: v1
kind: Pod
metadata:
  name: myfile
spec:
  containers:
  - name: nginx
    image: nginx
    volumeMounts:
    - name: foo
      mountPath: "/etc/foo"
      readOnly: true
  volumes:
  - name: foo
    secret:
      secretName: mysecret
kubectl apply -f file.yaml
kubectl exec -it myfile -- /bin/bash
root@myfile:~# ls /etc/foo/
password  username
root@myfile:~# cat /etc/foo/password 
1f2d1e2e67df
root@myfile:~# cat /etc/foo/username 
admin

2.8 configmap cm 配置文件

vim redis.conf
redis.host=127.0.0.1
redis.port=6379
redis.password=123456
kubectl create configmap redis-config --from-file=redis.conf 
[root@k8s-master yaml]# kubectl get cm
NAME           DATA   AGE
redis-config   1      36s
[root@k8s-master yaml]# kubectl describe cm redis-config 查看具体的详细信息

2.8.1 以文件 数据卷形式挂载

apiVersion: v1
kind: Pod
metadata:
  name: myconfig
spec:
  containers:
    - name: busybox
      image: busybox
      command: [ "/bin/sh","-c","cat /etc/config/redis.conf" ]
      volumeMounts:
      - name: config-volume
        mountPath: /etc/config
  volumes:
    - name: config-volume
      configMap:
        name: redis-config
  restartPolicy: Never
[root@k8s-master yaml]# kubectl logs myconfig
redis.host=127.0.0.1
redis.port=6379
redis.password=123456

2.8.2 以变量形式挂载

[root@k8s-master yaml]# vim cmenv.yaml  创建变量
apiVersion: v1
kind: ConfigMap
metadata:
  name: myconfig
  namespace: default
data:
  special.level: info
  special.type: hello
[root@k8s-master yaml]# vim cmenv-config.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: mypodenv
spec:
  containers:
    - name: busybox
      image: busybox
      command: [ "/bin/sh", "-c", "echo $(LEVEL) $(TYPE)" ]
      env:
        - name: LEVEL
          valueFrom:
            configMapKeyRef:
              name: myconfig
              key: special.level
        - name: TYPE
          valueFrom:
            configMapKeyRef:
              name: myconfig
              key: special.type
  restartPolicy: Never
[root@k8s-master yaml]# kubectl logs mypodenv
info hello

3. ingress 实现不同域名访问不同的sevice nginx反向代理

https://github.com/kubernetes/ingress-nginx/blob/master/docs/deploy/index.md

3.1 部署 service 服务

kubectl create deployment web --image=nginx
kubectl expose deployment web --port=80 --target-port=80 --type=NodePort

3.2 部署 ingress-controller

mkdir ingress-controller
cd  ingress-controller
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yaml
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/provider/baremetal/service-nodeport.yaml
vim mandatory.yaml
将里面的quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0
修改成  registry.aliyuncs.com/google_containers/nginx-ingress-controller:0.30.0
kubectl apply -f mandatory.yaml
kubectl apply -f service-nodeport.yaml
kubectl get namespace
kubectl get pods -n ingress-nginx
[root@k8s-master ~]# kubectl get svc -n ingress-nginx 
NAME            TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx   NodePort   10.104.19.64   <none>        80:30270/TCP,443:31119/TCP   99s
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
  name: example-ingress
spec:
  rules:
  - host: example.ingredemo.com
    http:
      paths:
      - path: /
        backend:
          serviceName: web
          servicePort: 80
kubectl apply -f ingress-web.yaml
vim /etc/hosts
192.168.199.101 example.ingredemo.com
[root@localhost ~]# curl example.ingredemo.com:30270
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

4. Helm

https://github.com/helm/helm/releases

wget https://get.helm.sh/helm-v3.5.0-linux-amd64.tar.gz
tar xf helm-v3.5.0-linux-amd64.tar.gz 
cd linux-amd64/
mv helm /usr/bin/
添加仓库
helm repo add stable http://mirror.azure.cn/kubernetes/charts
helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
helm repo update
helm repo list
helm repo remove aliyun 移除仓库
helm search repo waeve 搜索资源
helm install ui stable/weave-scope 安装资源
helm create mychart  自定义chart
Creating mychart
 tree mychart/
mychart/
├── charts
├── Chart.yaml
├── templates
│ ├── deployment.yaml
│ ├── _helpers.tpl
│ ├── ingress.yaml
│ ├── NOTES.txt
│ └── service.yaml
└── values.yaml
 Chart.yaml:用于描述这个 Chart 的基本信息,包括名字、描述信息以及版本等。
 values.yaml :用于存储 templates 目录中模板文件中用到变量的值。
 Templates: 目录里面存放所有 yaml 模板文件。
 charts:目录里存放这个 chart 依赖的所有子 chart。
 NOTES.txt :用于介绍 Chart 帮助信息, helm install 部署后展示给用户。例如:如何使用这个 Chart、列出缺省的设置等。
_helpers.tpl:放置模板助手的地方,可以在整个 chart 中重复使用

5. 部署dashboard

wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
vim recommended.yaml
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort  #增加
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30009 #增加
kubectl apply -f recommended.yaml
kubectl get namespace 查看新建命名空间
[root@k8s-master ~]# kubectl get pods -n kubernetes-dashboard -o wide
NAME                                         READY   STATUS              RESTARTS   AGE   IP       NODE         NOMINATED NODE   READINESS GATES
dashboard-metrics-scraper-6b4884c9d5-2dfsq   0/1     ContainerCreating   0          7s    <none>   k8s-master   <none>           <none>
kubernetes-dashboard-7b544877d5-wfggg        0/1     ContainerCreating   0          9s    <none>   k8s-master   <none>           <none>
[root@k8s-master ~]# kubectl get svc -n kubernetes-dashboard -o wide
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE   SELECTOR
dashboard-metrics-scraper   ClusterIP   10.100.206.239   <none>        8000/TCP        17s   k8s-app=dashboard-metrics-scraper
kubernetes-dashboard        NodePort    10.109.103.129   <none>        443:30009/TCP   19s   k8s-app=kubernetes-dashboard

http://192.168.199.100:30009 默认运行在master节点上
创建访问账户 获取token
[root@k8s-master ~]# kubectl create serviceaccount dashboard-admin -n kubernetes-dashboard
serviceaccount/dashboard-admin created
[root@k8s-master ~]# kubectl create clusterrolebinding dashboard-admin-rb --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin-rb created
[root@k8s-master ~]# kubectl describe secrets kubernetes-dashboard-token-pnbhs -n kubernetes-dashboard 粘贴就好了
eyJhbGciOiJSUzI1NiIsImtpZCI6ImRlOTZiSVRYb3BfbnhDcm9rcFBuS0dHT2M5MjVsWEZpRFJuWjVtSXJjNkUifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi1wbmJocyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjQyMDc5YzIxLTRlYjgtNDU5YS05MzkzLTliYThmZjZiMTEwMiIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.LDC2DCUinfO0Cow_40Pzv71SkpSvnKtvm95iqxMc26309FoRpZUsyzq8eV5UW4S7nYmlIrNQ7KVpHpMzGrkbX6x2yh_cz4VgtnhXqDRhv2YjMgKUToZpNyH4AF7zLE-r5XPEVi1uUFhwiNC5ENvrZu546ORMxr8AGa2jmybdev-lzLXCsY9eCCtbX4PHktvYEL3hhE8vQLFG4DdjG3SOieh26CEVw_hf7pdB1M2ImjPJTY7p2VMhwbfyC-dNypL23dC51DJs7GODlUEqpmnCgaeBFy4Hjz438aBCXqNpnh66GG-1Zy5RWba50HWWEBYiDVZgLAq412eW1oQii_HvqQ

6.动态pv

https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

#配置nfs
apt install nfs-kernel-server #服务端
mkdir /nfs/data -p
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
systemctl restart nfs-server

apt install nfs-common #客户端
showmount -e 10.0.0.10
mkdir /nfs/data -p
mount.nfs 10.0.0.10:/nfs/data /nfs/data
wget https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/archive/refs/tags/nfs-subdir-external-provisioner-4.0.14.tar.gz
tar xf nfs-subdir-external-provisioner-4.0.14.tar.gz
cd nfs-subdir-external-provisioner-4.0.14/deploy
vim deploy.yaml
修改里面的nfs的ip地址 跟nfs路径
kubectl apply -f class.yaml -f rbac.yaml -f deployment.yaml
kubectl apply -f test-claim.yaml
kubectl delet -f test-pod.yaml
#文件中如果有SUCCESS 说明成功
root@k8s-master:~# ll /nfs/data/default-test-claim-pvc-e09cdd52-3cb8-42d7-989c-ba5befa0d67a/
total 8
drwxrwxrwx 2 root root 4096 Oct  9 09:41 ./
drwxrwxrwx 4 root root 4096 Oct  9 09:40 ../
-rw-r--r-- 1 root root    0 Oct  9 09:41 SUCCESS

上一篇下一篇

猜你喜欢

热点阅读