Prometheus 最开始是由 SoundCloud 开发的开源监控告警系统,是 Google BorgMon 监控系统的开源版本。在 2016 年,Prometheus 加入 CNCF,成为继 Kubernetes 之后第二个被 CNCF 托管的项目。随着 Kubernetes 在容器编排领头羊地位的确立,Prometheus 也成为 Kubernetes 容器监控的标配。
关于Prometheus 的介绍可以参考我之前的文章:Prometheus原理详解
地址:https://artifacthub.io/packages/helm/prometheus-community/prometheus
# 添加repo
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update prometheus-community
helm search repo prometheus-community/prometheus
# 拉包
helm pull prometheus-community/prometheus
# 解包
tar -xf prometheus-15.12.2.tgz
grep -A3 'image:' prometheus/values.yaml
search-》pull-》tag-》push
### 1、alertmanager
docker search alertmanager
docker pull quay.io/prometheus/alertmanager
docker tag quay.io/prometheus/alertmanager myharbor.com/monitoring/alertmanager:v0.24.0
docker push myharbor.com/monitoring/alertmanager:v0.24.0
### 2、configmap-reload
docker search configmap-reload
docker pull jimmidyson/configmap-reload:v0.5.0
docker tag jimmidyson/configmap-reload:v0.5.0 myharbor.com/monitoring/configmap-reload:v0.5.0
docker push myharbor.com/monitoring/configmap-reload:v0.5.0
### 3、node-exporter
docker search node-exporter
docker pull quay.io/prometheus/node-exporter:v1.3.1
docker tag quay.io/prometheus/node-exporter:v1.3.1 myharbor.com/monitoring/node-exporter:v1.3.1
docker push myharbor.com/monitoring/node-exporter:v1.3.1
### 4、prometheus
docker search prometheus
docker pull quay.io/prometheus/prometheus:v2.36.2
docker tag quay.io/prometheus/prometheus:v2.36.2 myharbor.com/monitoring/prometheus:v2.36.2
docker push myharbor.com/monitoring/prometheus:v2.36.2
### 5、pushgateway
docker search pushgateway
docker pull prom/pushgateway:v1.4.3
docker tag prom/pushgateway:v1.4.3 myharbor.com/monitoring/pushgateway:v1.4.3
docker push myharbor.com/monitoring/pushgateway:v1.4.3
### 6、kube-state-metrics
# charts/kube-state-metrics/values.yaml
docker pull bitnami/kube-state-metrics
docker tag bitnami/kube-state-metrics:latest myharbor.com/monitoring/kube-state-metrics:latest
docker push myharbor.com/monitoring/kube-state-metrics:latest
修改镜像values.yaml
,charts/kube-state-metrics/values.yaml
# --dry-run --debug
helm install prometheus ./ \
-n prometheus \
--create-namespace \
--set server.ingress.enabled=true \
--set server.ingress.hosts='{prometheus.k8s.local}' \
--set server.ingress.paths='{/}' \
--set server.ingress.pathType=Prefix \
--set alertmanager.ingress.enabled=true \
--set alertmanager.ingress.hosts='{alertmanager.k8s.local}' \
--set alertmanager.ingress.paths='{/}' \
--set alertmanager.ingress.pathType=Prefix \
--set grafana.ingress.enabled=true \
--set grafana.ingress.hosts='{grafana.k8s.local}' \
--set grafana.ingress.paths='{/}' \
--set grafana.ingress.pathType=Prefix
NOTES
NAME: prometheus
LAST DEPLOYED: Sat Sep 17 10:06:04 2022
NAMESPACE: prometheus
STATUS: deployed
REVISION: 1
TEST SUITE: None
NOTES:
The Prometheus server can be accessed via port 80 on the following DNS name from within your cluster:
prometheus-server.prometheus.svc.cluster.local
Get the Prometheus server URL by running these commands in the same shell:
export POD_NAME=$(kubectl get pods --namespace prometheus -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace prometheus port-forward $POD_NAME 9090
The Prometheus alertmanager can be accessed via port 80 on the following DNS name from within your cluster:
prometheus-alertmanager.prometheus.svc.cluster.local
From outside the cluster, the alertmanager URL(s) are:
http://alertmanager.k8s.local
#################################################################################
###### WARNING: Pod Security Policy has been moved to a global property. #####
###### use .Values.podSecurityPolicy.enabled with pod-based #####
###### annotations #####
###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) #####
#################################################################################
The Prometheus PushGateway can be accessed via port 9091 on the following DNS name from within your cluster:
prometheus-pushgateway.prometheus.svc.cluster.local
Get the PushGateway URL by running these commands in the same shell:
export POD_NAME=$(kubectl get pods --namespace prometheus -l "app=prometheus,component=pushgateway" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace prometheus port-forward $POD_NAME 9091
For more information on running Prometheus, visit:
https://prometheus.io/
查看
kubectl get pods,svc,ingress -n prometheus
prometheus:http://prometheus.k8s.local/
alertmanager:http://alertmanager.k8s.local
cd /opt/k8s/prometheus/artifacthub/prometheus
mkdir tls ; cd tls
# 生成 CA 证书私钥
openssl genrsa -out ca.key 4096
# 生成 CA 证书
openssl req -x509 -new -nodes -sha512 -days 3650 \
-subj "/C=CN/ST=Guangdong/L=Shenzhen/O=k8s.local/OU=k8s.local/CN=k8s.local" \
-key ca.key \
-out ca.crt
# 创建域名证书,生成私钥
openssl genrsa -out k8s.local.key 4096
# 生成证书签名请求 CSR
openssl req -sha512 -new \
-subj "/C=CN/ST=Guangdong/L=Shenzhen/O=k8s.local/OU=k8s.local/CN=k8s.local" \
-key k8s.local.key \
-out k8s.local.csr
# 生成 x509 v3 扩展
cat > v3.ext <<-EOF
authorityKeyIdentifier=keyid,issuer
basicConstraints=CA:FALSE
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1=k8s.local
DNS.2=*.k8s.local
DNS.3=k8s.local
EOF
#创建 k8s.local 访问证书
openssl x509 -req -sha512 -days 3650 \
-extfile v3.ext \
-CA ca.crt -CAkey ca.key -CAcreateserial \
-in k8s.local.csr \
-out k8s.local.crt
alertmanager:
...
ingress:
...
tls:
- secretName: prometheus-alerts-tls
hosts:
- alertmanager.k8s.local
...
server:
...
ingress:
...
tls:
- secretName: prometheus-alerts-tls
hosts:
- alertmanager.k8s.local
...
secrets:
- name: prometheus-alerts-tls
cert: tls/k8s.local.crt
key: tls/k8s.local.key
新增一个templates/tls-secret.yaml
文件
{{ range .Values.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
data:
tls.crt: {{ $.Files.Get .cert | b64enc }}
tls.key: {{ $.Files.Get .key | b64enc }}
type: kubernetes.io/tls
---
{{ end }}
helm upgrade prometheus ./ -n prometheus
查看
kubectl get pods,svc,ingress -n prometheus
web 访问:
https://prometheus.k8s.local/
https://alertmanager.k8s.local/
helm uninstall prometheus -n prometheus
kubectl delete pod -n prometheus `kubectl get pod -n prometheus |awk 'NR>1{print $1}'` --force
kubectl patch ns prometheus -p '{"metadata":{"finalizers":null}}'
kubectl delete ns prometheus --force
地址:https://artifacthub.io/packages/helm/grafana/grafana
helm repo add grafana https://grafana.github.io/helm-charts
helm repo update grafana
helm search repo grafana/grafana
helm pull grafana/grafana
tar -xf grafana-6.38.3.tgz
grep -A3 'image:' grafana/values.yaml
search-》pull-》tag-》push
### 1、grafana
docker search grafana
docker pull grafana/grafana
docker tag grafana/grafana:latest myharbor.com/monitoring/grafana:9.1.5
docker push myharbor.com/monitoring/grafana:9.1.5
### 2、bats
docker search bats
docker pull bats/bats:v1.4.1
docker tag bats/bats:v1.4.1 myharbor.com/monitoring/bats:v1.4.1
docker push myharbor.com/monitoring/bats:v1.4.1
### 3、busybox
docker search busybox
docker pull busybox:1.31.1
docker tag busybox:1.31.1 myharbor.com/monitoring/busybox:1.31.1
docker push myharbor.com/monitoring/busybox:1.31.1
### 4、k8s-sidecar
docker search k8s-sidecar
docker pull quay.io/kiwigrid/k8s-sidecar:1.19.2
docker tag quay.io/kiwigrid/k8s-sidecar:1.19.2 myharbor.com/monitoring/k8s-sidecar:1.19.2
docker push myharbor.com/monitoring/k8s-sidecar:1.19.2
### 5、grafana-image-renderer
docker search grafana-image-renderer
docker pull grafana/grafana-image-renderer:latest
docker tag grafana/grafana-image-renderer:latest myharbor.com/monitoring/grafana-image-renderer:latest
docker push myharbor.com/monitoring/grafana-image-renderer:latest
修改镜像values.yaml
helm install grafana ./ \
-n grafana \
--create-namespace \
--set ingress.enabled=true \
--set ingress.hosts='{grafana.k8s.local}' \
--set ingress.paths='{/}' \
--set ingress.pathType=Prefix
NOTES
NAME: grafana
LAST DEPLOYED: Sat Sep 17 11:41:14 2022
NAMESPACE: grafana
STATUS: deployed
REVISION: 1
NOTES:
1. Get your 'admin' user password by running:
kubectl get secret --namespace grafana grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
2. The Grafana server can be accessed via port 80 on the following DNS name from within your cluster:
grafana.grafana.svc.cluster.local
If you bind grafana to 80, please update values in values.yaml and reinstall:
securityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
command:
- "setcap"
- "'cap_net_bind_service=+ep'"
- "/usr/sbin/grafana-server &&"
- "sh"
- "/run.sh"
Details refer to https://grafana.com/docs/installation/configuration/#http-port.
Or grafana would always crash.
From outside the cluster, the server URL(s) are:
http://grafana.k8s.local
3. Login with the password from step 1 and the username: admin
#################################################################################
###### WARNING: Persistence is disabled!!! You will lose your data when #####
###### the Grafana pod is terminated. #####
#################################################################################
查看
kubectl get pods,svc,ingress -n grafana
http://grafana.k8s.local/
账号:admin
,密码通过下面命令获取0D0NfEWWFx9qsBiKR8PuFVxf6PPa9o8YGhZZaNXY
kubectl get secret --namespace grafana grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
证书的就用上面的,注意记得把stl文件copy到grafana部署目录
...
ingress:
...
tls:
- secretName: prometheus-alerts-tls
hosts:
- grafana.k8s.local
...
secrets:
- name: grafana-alerts-tls
cert: tls/k8s.local.crt
key: tls/k8s.local.key
新增一个templates/tls-secret.yaml
文件
{{ range .Values.secrets }}
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
data:
tls.crt: {{ $.Files.Get .cert | b64enc }}
tls.key: {{ $.Files.Get .key | b64enc }}
type: kubernetes.io/tls
---
{{ end }}
helm upgrade grafana ./ -n grafana
查看
kubectl get pods,svc,ingress -n grafana
web 访问:https://grafana.k8s.local/
账号:admin
,密码通过下面命令获取0D0NfEWWFx9qsBiKR8PuFVxf6PPa9o8YGhZZaNXY
kubectl get secret --namespace grafana grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
helm uninstall grafana -n grafana
kubectl delete pod -n grafana `kubectl get pod -n grafana|awk 'NR>1{print $1}'` --force
kubectl patch ns grafana -p '{"metadata":{"finalizers":null}}'
kubectl delete ns grafana --force
Prometheus on K8s 环境部署就先到这里了,下一篇文章讲具体怎么使用Prometheus+grafana监控k8s资源,请小伙伴耐心等待哦,有任何疑问欢迎给我留言哦~