• 使用Kube-prometheus部署Prometheus


    Kube-prometheus部署Prometheus

    下载 kube-prometheus

    # k8s 1.22 只能使用0.10和0.9
    wget https://github.com/prometheus-operator/kube-prometheus/archive/refs/tags/v0.10.0.tar.gz
    
    # mkdir -p service-monitor prometheus adapter node-exporter kube-state-metrics grafana alertmanager operator other blackbox-exporter
    
    cd /data/bigdata/kube-prometheus-0.10.0/manifests
    
    # 查看镜像
    grep -rn 'image: '
    
    # 镜像tag重置及推送镜像到harbor
    docker tag quay.io/prometheus/alertmanager:v0.23.0 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/alertmanager:v0.23.0
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/alertmanager:v0.23.0
    
    docker tag quay.io/prometheus/blackbox-exporter:v0.19.0 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/blackbox-exporter:v0.19.0
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/blackbox-exporter:v0.19.0
    
    docker tag quay.io/brancz/kube-rbac-proxy:v0.11.0 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/kube-rbac-proxy:v0.11.0
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/kube-rbac-proxy:v0.11.0
    
    docker tag quay.io/prometheus/node-exporter:v1.3.1 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/node-exporter:v1.3.1
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/node-exporter:v1.3.1
    
    docker tag quay.io/prometheus/prometheus:v2.32.1 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus:v2.32.1
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus:v2.32.1
    
    docker tag quay.io/prometheus-operator/prometheus-config-reloader:v0.53.1 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus-config-reloader:v0.53.1
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus-config-reloader:v0.53.1
    
    docker tag quay.io/prometheus-operator/prometheus-operator:v0.53.1 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus-operator:v0.53.1
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus-operator:v0.53.1
    
    docker tag bitnami/kube-state-metrics:2.3.0 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/kube-state-metrics:2.3.0
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/kube-state-metrics:2.3.0
    
    docker tag willdockerhub/prometheus-adapter:v0.9.1 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus-adapter:v0.9.1
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/prometheus-adapter:v0.9.1
    
    docker tag grafana/grafana:8.3.3 	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/grafana:8.3.3
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/grafana:8.3.3
    
    docker tag prom/pushgateway:v1.4.3	 bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/pushgateway:v1.4.3
    docker push bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/pushgateway:v1.4.3
    
    
    # 替换为自建镜像仓库
    sed -i 's/quay.io/bigdata-uat-k8s-harbor.ky-tech.com.cn\/bigdata/g' alertmanager-alertmanager.yaml
    sed -i 's/quay.io/bigdata-uat-k8s-harbor.ky-tech.com.cn\/bigdata/g' blackboxExporter-deployment.yaml
    sed -i 's/quay.io\/brancz/bigdata-uat-k8s-harbor.ky-tech.com.cn\/bigdata\/prometheus/g' kubeStateMetrics-deployment.yaml
    sed -i 's/quay.io/bigdata-uat-k8s-harbor.ky-tech.com.cn\/bigdata/g' nodeExporter-daemonset.yaml
    sed -i 's/quay.io/bigdata-uat-k8s-harbor.ky-tech.com.cn\/bigdata/g' prometheus-prometheus.yaml
    sed -i 's/quay.io\/prometheus-operator/bigdata-uat-k8s-harbor.ky-tech.com.cn\/bigdata\/prometheus/g' prometheusOperator-deployment.yaml
    
    # 还需要修改一下地方
    # blackboxExporter-deployment.yaml:77:        image: bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/brancz/kube-rbac-proxy:v0.11.0
    
    # kubeStateMetrics-deployment.yaml:34:        image: k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.3.0
    # prometheusAdapter-deployment.yaml:39:        image: k8s.gcr.io/prometheus-adapter/prometheus-adapter:v0.9.1
    # grafana-deployment.yaml:32:        image: grafana/grafana:8.3.3
    # blackboxExporter-deployment.yaml:54:        image: jimmidyson/configmap-reload:v0.5.0
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61

    修改类型为 NodePort

    cd /data/bigdata/kube-prometheus-0.10.0/manifests
    vim prometheus-service.yaml
    
    spec:
      type: NodePort # 新增
      ports:
      - name: web
        port: 9090
        targetPort: web
        nodePort: 30090 # 新增
      - name: reloader-web
        port: 8080
        targetPort: reloader-web
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    vim alertmanager-service.yaml
    
    spec:
      type: NodePort # 新增
      ports:
      - name: web
        port: 9093
        targetPort: web
        nodePort: 30093 # 新增
      - name: reloader-web
        port: 8080
        targetPort: reloader-web
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    vim grafana-service.yaml
    
    spec:
      type: NodePort # 新增
      ports:
      - name: http
        port: 3000
        targetPort: http
        nodePort: 30300 # 新增   30000-32767
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9

    kube-prometheus 集成 pushgateway

    apiVersion: apps/v1
    kind: Deployment
    metadata:
      labels:
        app.kubernetes.io/name: pushgateway
      name: pushgateway
      namespace: monitoring
    spec:
      replicas: 1
      selector:
        matchLabels:
          app.kubernetes.io/name: pushgateway
      template:
        metadata:
          labels:
            app.kubernetes.io/name: pushgateway
        spec:
          nodeSelector:
            kubernetes.io/os: linux
          containers:
          - image: bigdata-uat-k8s-harbor.ky-tech.com.cn/bigdata/prometheus/pushgateway:v1.4.3
            name: pushgateway
            ports:
            - containerPort: 9091
              name: http
            resources:
              limits:
                cpu: 200m
                memory: 512Mi
              requests:
                cpu: 50m
                memory: 128Mi
          restartPolicy: Always
    ---
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        app.kubernetes.io/name: pushgateway
      name: pushgateway
      namespace: monitoring
    spec:
      type: NodePort
      ports:
      - name: http
        port: 9091
        targetPort: http
        nodePort: 30391 # 新增
      selector:
        app.kubernetes.io/name: pushgateway
    ---
    apiVersion: monitoring.coreos.com/v1
    kind: ServiceMonitor
    metadata:
      labels:
        app.kubernetes.io/name: pushgateway
      name: pushgateway
      namespace: monitoring
    spec:
      endpoints:
      - interval: 30s
        path: /metrics
        port: http
        scheme: http
      selector:
        matchLabels:
          app.kubernetes.io/name: pushgateway
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68

    修改grafana配置

    vim grafana-config.yaml
    # 增加匿名访问,grafana数据存储到mysql
    # 密码不需要base64编码
    
      grafana.ini: |
        [security]
        allow_embedding = true
        [date_formats]
        default_timezone = UTC
        [auth.anonymous]
        enabled = true
        [database]
        type = mysql
        host = mysql-master.mysql:3306
        name = grafana
        user = grafana
        password = YQZi5UUw825h
    type: Opaque
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18

    Prometheus 持久化存储

    vim sc.yaml
    
    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
      name: prometheus-local-storage
    provisioner: kubernetes.io/no-provisioner # 不自动创建PV
    volumeBindingMode: WaitForFirstConsumer  # pod引用pvc后,pvc才进入绑定状态
    reclaimPolicy: Retain   # pvc删除后数据不做清除
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    vim pv.yaml
    
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: prometheus-pv-1
    spec:
      storageClassName: prometheus-local-storage
      capacity:
        storage: 10Gi
      accessModes:
        - ReadWriteOnce
      local:
        path: /data/zxl/prometheus1/  # 创建目录
      nodeAffinity:
        required:
          nodeSelectorTerms:
            - matchExpressions:
                - key: kubernetes.io/hostname
                  operator: In
                  values:
                    - szzb-bg-dev-etl-4  # 指定节点
    ---
    apiVersion: v1
    kind: PersistentVolume
    metadata:
      name: prometheus-pv-2
    spec:
      storageClassName: prometheus-local-storage
      capacity:
        storage: 10Gi
      accessModes:
        - ReadWriteOnce
      local:
        path: /data/zxl/prometheus2/  # 创建目录
      nodeAffinity:
        required:
          nodeSelectorTerms:
            - matchExpressions:
                - key: kubernetes.io/hostname
                  operator: In
                  values:
                    - szzb-bg-dev-etl-5  # 指定节点
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    # 末尾增加
    vim prometheus-prometheus.yaml
    
      retention: 15d
      storage:
        volumeClaimTemplate:
          spec:
            storageClassName: prometheus-local-storage
            resources:
              requests:
                storage: 10Gi
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11

    配置Ingress

    vim ingress.yaml
    
    apiVersion: networking.k8s.io/v1
    kind: Ingress
    metadata:
      name: prometheus-ingress
      namespace: monitoring
      annotations:
        kubernetes.io/ingress.class: "nginx"
        prometheus.io/http_probe: "true"
    spec:
      rules:
      - host: alert.k8s.com
        http:
          paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: alertmanager-main
                port:
                  number: 9093
      - host: bigdata-uat-k8s-grafana.ky-tech.com.cn 
        http:
          paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: grafana
                port:
                  number: 3000
      tls:
      - hosts:
        - bigdata-uat-k8s-grafana.ky-tech.com.cn  # https域名
        secretName: grafana-secret
    
      - host: bigdata-uat-k8s-prometheus.ky-tech.com.cn
        http:
          paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: prometheus-k8s
                port:
                  number: 9090
      tls:
      - hosts:
        - bigdata-uat-k8s-prometheus.ky-tech.com.cn  # https域名
        secretName: prometheus-secret
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    kubectl create secret tls grafana-secret --key /data/k8s-install/ssl/ky-tech.com.cn_nginx/ky-tech.com.cn.key --cert /data/k8s-install/ssl/ky-tech.com.cn_nginx/ky-tech.com.cn_bundle.crt -n monitoring
    
    kubectl create secret tls prometheus-secret --key /data/k8s-install/ssl/ky-tech.com.cn_nginx/ky-tech.com.cn.key --cert /data/k8s-install/ssl/ky-tech.com.cn_nginx/ky-tech.com.cn_bundle.crt -n monitoring
    
    • 1
    • 2
    • 3
    # 使修改生效 
    kubectl apply -rf .
    
    # delete prometheus-k8s-0  prometheus-k8s-1 pod
    
    # kubectl get ing -n monitoring 查看ingress
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    部署

    # 部署
    kubectl apply --server-side -f manifests/setup
    kubectl wait \
    	--for condition=Established \
    	--all CustomResourceDefinition \
    	--namespace=monitoring
    kubectl apply -f manifests/
    
    
    # 卸载
    kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11

    监控k8s集群外节点

    ---
    apiVersion: monitoring.coreos.com/v1
    kind: ServiceMonitor
    metadata:
      labels:
        app.kubernetes.io/component: exporter
        app.kubernetes.io/name: nginx-exporter
        app.kubernetes.io/app: nginx
      name: nginx
      namespace: monitoring
    spec:
      endpoints:
      - interval: 30s
        port: metrcis
      selector:
        matchLabels:
          app.kubernetes.io/component: exporter
          app.kubernetes.io/name: nginx-exporter
          app.kubernetes.io/app: nginx
      namespaceSelector:
          matchNames:
          - monitoring
    ---
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        app: nginx-exporter
        app.kubernetes.io/component: exporter
        app.kubernetes.io/name: nginx-exporter
        app.kubernetes.io/app: nginx
      name: nginx
      namespace: monitoring
    spec:
      type: ClusterIP
      clusterIP: None
      ports:
      - name: metrcis
        port: 9113
        targetPort: 9113
        protocol: TCP
    ---
    apiVersion: v1
    kind: Endpoints
    metadata:
      labels:
        app.kubernetes.io/component: exporter
        app.kubernetes.io/name: nginx-exporter
        app.kubernetes.io/app: nginx
      name: nginx
      namespace: monitoring
    subsets:
      - addresses:
        - ip: 192.168.26.11
        - ip: 192.168.26.13
        ports:
        - name: metrcis
          port: 9113
          protocol: TCP
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60

    Kube-prometheus 监控 K8s 集群外服务的两种方式

  • 相关阅读:
    【左神算法笔记】Class1:异或交换,时间复杂度计算
    ChatGPT从⼊⻔到精通
    分布式面试详解
    Windows wsl2安装Ubuntu
    从命令行管理文件
    阿里巴巴面试题- - -多线程&并发篇(三十六)
    【MongoDB】索引 – 通配符索引
    Java面试之JavaWeb常用框架(offer 拿来吧你)
    H5新Api | requestIdleCallback - requestAnimationFram
    两台Linux服务器之间传送文件
  • 原文地址:https://blog.csdn.net/Shyllin/article/details/127674299