• kubernetes集群编排——k8s资源监控


    资源限制

    上传镜像

    [root@k8s2 limit]# vim limit.yaml
    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: memory-demo
    5. spec:
    6. containers:
    7. - name: memory-demo
    8. image: stress
    9. args:
    10. - --vm
    11. - "1"
    12. - --vm-bytes
    13. - 200M
    14. resources:
    15. requests:
    16. memory: 50Mi
    17. limits:
    18. memory: 100Mi
    1. [root@k8s2 limit]# kubectl apply -f limit.yaml
    2. [root@k8s2 limit]# kubectl get pod

    limitrange

    [root@k8s2 limit]# vim range.yaml
    1. apiVersion: v1
    2. kind: LimitRange
    3. metadata:
    4. name: limitrange-memory
    5. spec:
    6. limits:
    7. - default:
    8. cpu: 0.5
    9. memory: 512Mi
    10. defaultRequest:
    11. cpu: 0.1
    12. memory: 256Mi
    13. max:
    14. cpu: 1
    15. memory: 1Gi
    16. min:
    17. cpu: 0.1
    18. memory: 100Mi
    19. type: Container
    1. [root@k8s2 limit]# kubectl apply -f range.yaml
    2. [root@k8s2 limit]# kubectl describe limitranges

    创建的pod自动添加限制

    1. [root@k8s2 limit]# kubectl run demo --image nginx
    2. [root@k8s2 limit]# kubectl describe pod demo

    自定义限制的pod也需要在limitrange定义的区间内

    [root@k8s2 limit]# kubectl apply -f limit.yaml

    ResourceQuota

    [root@k8s2 limit]# vim quota.yaml
    1. apiVersion: v1
    2. kind: ResourceQuota
    3. metadata:
    4. name: mem-cpu-demo
    5. spec:
    6. hard:
    7. requests.cpu: "2"
    8. requests.memory: 2Gi
    9. limits.cpu: "2"
    10. limits.memory: 2Gi
    11. pods: "3"
    1. [root@k8s2 limit]# kubectl apply -f quota.yaml
    2. [root@k8s2 limit]# kubectl describe resourcequotas

    1. 配额是针对namespace施加的总限额,命名空间内的所有pod资源总和不能超过此配额
    2. 创建的pod必须定义资源限制

    metrics-server

    官网:https://github.com/kubernetes-sigs/metrics-server

    下载部署文件

    [root@k8s2 metrics]# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml

    修改部署文件

    [root@k8s2 metrics]# vim components.yaml
    1. - --kubelet-insecure-tls
    2. image: metrics-server/metrics-server:v0.6.4

    上传镜像到harbor

    1. [root@k8s2 metrics]# kubectl apply -f components.yaml
    2. [root@k8s2 metrics]# kubectl -n kube-system get pod

    如有问题,可以查看日志

    [root@k8s2 metrics]# kubectl -n kube-system logs metrics-server-5d54764497-7wmjg

    1. [root@k8s2 metrics]# kubectl top node
    2. [root@k8s2 metrics]# kubectl top pod -A --sort-by cpu

    dashboard

    官网:https://github.com/kubernetes/dashboard

    下载部署文件

    [root@k8s2 dashboard]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml

    上传所需镜像到harbor

    部署

    [root@k8s2 dashboard]# kubectl apply -f recommended.yaml

    修改svc

    [root@k8s2 dashboard]# kubectl -n kubernetes-dashboard edit svc kubernetes-dashboard

    [root@k8s2 dashboard]# kubectl -n kubernetes-dashboard get svc

    集群需要部署metallb-system,如果没有可以使用NodePort方式

    访问:https:192.168.92.101

    授权

    [root@k8s2 dashboard]# vim rbac.yaml
    1. apiVersion: rbac.authorization.k8s.io/v1
    2. kind: ClusterRoleBinding
    3. metadata:
    4. name: admin-user
    5. roleRef:
    6. apiGroup: rbac.authorization.k8s.io
    7. kind: ClusterRole
    8. name: cluster-admin
    9. subjects:
    10. - kind: ServiceAccount
    11. name: kubernetes-dashboard
    12. namespace: kubernetes-dashboard
    [root@k8s2 dashboard]# kubectl apply -f rbac.yaml

    获取token

    [root@k8s2 dashboard]# kubectl -n kubernetes-dashboard create token kubernetes-dashboard

    使用token登录网页

    k9s

    解压

    [root@k8s2 ~]# tar zxf k9s_Linux_amd64.tar.gz

    部署

    [root@k8s2 ~]# mv k9s /usr/bin

    启动

    [root@k8s2 ~]# k9s

    hpa

    官网:https://kubernetes.io/zh-cn/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/

    上传镜像

    [root@k8s2 hpa]# vim hpa.yaml
    1. apiVersion: apps/v1
    2. kind: Deployment
    3. metadata:
    4. name: php-apache
    5. spec:
    6. selector:
    7. matchLabels:
    8. run: php-apache
    9. replicas: 1
    10. template:
    11. metadata:
    12. labels:
    13. run: php-apache
    14. spec:
    15. containers:
    16. - name: php-apache
    17. image: hpa-example
    18. ports:
    19. - containerPort: 80
    20. resources:
    21. limits:
    22. cpu: 500m
    23. requests:
    24. cpu: 200m
    25. ---
    26. apiVersion: v1
    27. kind: Service
    28. metadata:
    29. name: php-apache
    30. labels:
    31. run: php-apache
    32. spec:
    33. ports:
    34. - port: 80
    35. selector:
    36. run: php-apache
    1. [root@k8s2 hpa]# kubectl apply -f hpa.yaml
    2. [root@k8s2 hpa]# kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
    3. [root@k8s2 hpa]# kubectl get hpa

    压测

    [root@k8s2 hpa]# kubectl run -i --tty load-generator --rm --image=busybox --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://php-apache; done"

    pod负载上升

    1. [root@k8s2 ~]# watch -n1 kubectl top pod
    2. [root@k8s2 ~]# kubectl get hpa

    触发hpa扩容pod

    结束压测后,默认等待5分钟冷却时间,pod会被自动回收

    多项量度指标

    [root@k8s2 hpa]# kubectl get hpa php-apache -o yaml > hpa-v2.yaml

    修改文件,增加内存指标

    [root@k8s2 hpa]# vim hpa-v2.yaml
    1. - resource:
    2. name: memory
    3. target:
    4. averageValue: 50Mi
    5. type: AverageValue
    6. type: Resource

    1. [root@k8s2 hpa]# kubectl apply -f hpa-v2.yaml
    2. [root@k8s2 hpa]# kubectl get hpa

  • 相关阅读:
    服务器租用安全么?
    Educational Codeforces Round 155 (Rated for Div. 2)
    简单讲解RabbitMQ
    面试总结 - 计算机网络
    游戏引擎中网络游戏的基础
    C++的在vs上面用ffmpeg做音频流捕捉的代码
    selenium打开火狐浏览器
    基于强化学习的自动化红队测试计划构建与验证
    B树的插入和删除
    工程物料管理信息化建设(十二)——关于工程物料管理系统最后的思考
  • 原文地址:https://blog.csdn.net/dgffd/article/details/134300006