• kubernetes集群编排(6)


    目录

    k8s调度

    nodename

    nodeselector

    nodeaffinity

    podaffinity

    podantiaffinity

    Taints

    cordon、drain、delete


    k8s调度

    nodename

    1. [root@k8s2 node]# vim nodename.yaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: nginx
    6. labels:
    7. app: nginx
    8. spec:
    9. containers:
    10. - name: nginx
    11. image: nginx
    12. nodeName: k8s3
    13. [root@k8s2 node]# kubectl apply -f nodename.yaml

    [root@k8s2 node]# kubectl delete -f nodename.yaml

    nodeselector

    NodeSelector是Kubernetes中一个用于选择节点的机制。它是一种标签选择器,可以根据选择器定义匹配节点标签,并将Pod调度到这些匹配的节点上

    1. [root@k8s2 node]# vim nodeselector.yaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: nginx
    6. spec:
    7. containers:
    8. - name: nginx
    9. image: nginx
    10. imagePullPolicy: IfNotPresent
    11. nodeSelector:
    12. disktype: ssd
    13. [root@k8s2 node]# kubectl label nodes k8s4 disktype=ssd
    14. [root@k8s2 node]# kubectl label nodes k8s3 disktype=ssd
    15. [root@k8s2 node]# kubectl apply -f nodeselector.yaml
    16. [root@k8s2 node]# kubectl delete -f nodeselector.yaml

    nodeaffinity

    1. [root@k8s2 node]# vim nodeaffinity.yaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: node-affinity
    6. spec:
    7. containers:
    8. - name: nginx
    9. image: nginx
    10. affinity:
    11. nodeAffinity:
    12. requiredDuringSchedulingIgnoredDuringExecution:
    13. nodeSelectorTerms:
    14. - matchExpressions:
    15. - key: disktype
    16. operator: In
    17. values:
    18. - ssd
    19. - fc
    20. preferredDuringSchedulingIgnoredDuringExecution:
    21. - weight: 1
    22. preference:
    23. matchExpressions:
    24. - key: kubernetes.io/hostname
    25. operator: NotIn
    26. values:
    27. - k8s3
    28. [root@k8s2 node]# kubectl apply -f nodeaffinity.yaml
    29. [root@k8s2 node]# kubectl describe pod node-affinity
    30. [root@k8s2 node]# kubectl delete -f nodeaffinity.yaml

    podaffinity

    1. [root@k8s2 node]# vim podaffinity.yaml
    2. apiVersion: apps/v1
    3. kind: Deployment
    4. metadata:
    5. name: nginx-deployment
    6. labels:
    7. app: nginx
    8. spec:
    9. replicas: 3
    10. selector:
    11. matchLabels:
    12. app: nginx
    13. template:
    14. metadata:
    15. labels:
    16. app: nginx
    17. spec:
    18. containers:
    19. - name: nginx
    20. image: nginx
    21. affinity:
    22. podAffinity:
    23. requiredDuringSchedulingIgnoredDuringExecution:
    24. - labelSelector:
    25. matchExpressions:
    26. - key: app
    27. operator: In
    28. values:
    29. - nginx
    30. topologyKey: "kubernetes.io/hostname"
    31. [root@k8s2 node]# kubectl apply -f podaffinity.yaml
    32. [root@k8s2 node]# kubectl delete -f podaffinity.yaml

    podantiaffinity

    1. [root@k8s2 node]# vim podantiaffinity.yaml
    2. apiVersion: apps/v1
    3. kind: Deployment
    4. metadata:
    5. name: nginx-deployment
    6. labels:
    7. app: nginx
    8. spec:
    9. replicas: 3
    10. selector:
    11. matchLabels:
    12. app: nginx
    13. template:
    14. metadata:
    15. labels:
    16. app: nginx
    17. spec:
    18. containers:
    19. - name: nginx
    20. image: nginx
    21. affinity:
    22. podAntiAffinity:
    23. requiredDuringSchedulingIgnoredDuringExecution:
    24. - labelSelector:
    25. matchExpressions:
    26. - key: app
    27. operator: In
    28. values:
    29. - nginx
    30. topologyKey: "kubernetes.io/hostname"
    31. [root@k8s2 node]# kubectl apply -f podantiaffinity.yaml
    32. [root@k8s2 node]# kubectl delete -f podantiaffinity.yaml

    Taints

    1. [root@k8s2 node]# vim taint.yaml
    2. apiVersion: apps/v1
    3. kind: Deployment
    4. metadata:
    5. labels:
    6. app: web
    7. name: web
    8. spec:
    9. replicas: 3
    10. selector:
    11. matchLabels:
    12. app: web
    13. template:
    14. metadata:
    15. labels:
    16. app: web
    17. spec:
    18. containers:
    19. - image: nginx
    20. name: nginx
    21. [root@k8s2 node]# kubectl apply -f taint.yaml

    设置taint

    1. root@k8s2 pod]# kubectl taint node k8s3 k1=v1:NoSchedule
    2. [root@k8s2 pod]# kubectl describe nodes k8s3 |grep Tain

    [root@k8s2 pod]# kubectl scale deployment web --replicas 6
    

    1. [root@k8s2 pod]# kubectl taint node k8s3 k1=v1:NoExecute
    2. [root@k8s2 pod]# kubectl describe nodes k8s3 |grep Tain

    设置 tolerations

    1. [root@k8s2 node]# vim taint.yaml
    2. apiVersion: apps/v1
    3. kind: Deployment
    4. metadata:
    5. labels:
    6. app: web
    7. name: web
    8. spec:
    9. replicas: 6
    10. selector:
    11. matchLabels:
    12. app: web
    13. template:
    14. metadata:
    15. labels:
    16. app: web
    17. spec:
    18. tolerations:
    19. - operator: Exists
    20. effect: NoSchedule
    21. containers:
    22. - image: nginx
    23. name: nginx
    24. [root@k8s2 node]# kubectl apply -f taint.yaml

    容忍所有taints

    1. [root@k8s2 node]# vim taint.yaml
    2. apiVersion: apps/v1
    3. kind: Deployment
    4. metadata:
    5. labels:
    6. app: web
    7. name: web
    8. spec:
    9. replicas: 6
    10. selector:
    11. matchLabels:
    12. app: web
    13. template:
    14. metadata:
    15. labels:
    16. app: web
    17. spec:
    18. tolerations:
    19. - operator: Exists
    20. containers:
    21. - image: nginx
    22. name: nginx
    23. [root@k8s2 node]# kubectl apply -f taint.yaml

    回收、删除taints

    1. [root@k8s2 node]# kubectl delete -f taint.yaml
    2. [root@k8s2 node]# kubectl taint node k8s3 k1-

    cordon、drain、delete

    1. [root@k8s2 node]# kubectl create deployment demo --image nginx --replicas 3
    2. [root@k8s2 node]# kubectl cordon k8s3
    3. [root@k8s2 node]# kubectl get node

    [root@k8s2 node]# kubectl scale deployment demo --replicas 6
    

    [root@k8s2 node]# kubectl drain k8s3 --ignore-daemonsets
    

    1. [root@k8s2 node]# kubectl delete nodes k8s3
    2. [root@k8s2 node]# kubectl get node
    3. k8s3节点重启kubelet服务重新加入集群
    4. [root@k8s3 ~]# systemctl restart kubelet
    5. [root@k8s2 node]# kubectl get node

  • 相关阅读:
    flink的KeyedBroadcastProcessFunction测试
    Shell脚本之linux服务器服务进程监控
    java中HashMap的实现原理
    谷粒学院16万字笔记+1600张配图(五)——讲师管理前端
    红黑树的插入底层【C++】
    Java八股文系列之六(Spring)
    WEB应用
    数据结构原理与分析知识点
    一文讲透消息队列RocketMQ实现消费幂等
    Vue路由与nodejs环境搭建
  • 原文地址:https://blog.csdn.net/m0_64028800/article/details/134277085