• k8s搭建pinpoint链路追踪


    k8s搭建pinpoint–zookeeper

    1、创建目录和专用的名称空间

    mkdir -p /data/yaml/pinpoint && cd /data/yaml/pinpoint
    kubectl create ns pinpoint
    
    • 1
    • 2

    2、部署zookeepers

    mkdir -p /data/yaml/pinpoint/zookeepers && cd /data/yaml/pinpoint/zookeepers
    
    
    • 1
    • 2
    vim zoo1.yaml
    apiVersion: v1
    kind: Service
    metadata:
        namespace: pinpoint
        labels:
          app: zoo1
        name: zoo1
    spec:
       ports:
       - name: httpa
         targetPort: 2181
         port: 2181
       - name: httpb
         targetPort: 2888
         port: 2888
       - name: httpc
         targetPort: 3888
         port: 3888
       selector:
         app: zoo1
    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      namespace: pinpoint
      name: zoo1
      labels:
         app: zoo1
    spec:
      replicas: 1
      minReadySeconds: 120
      strategy:
        type: RollingUpdate
        rollingUpdate:
           maxSurge: 1
           maxUnavailable: 0
      selector:
        matchLabels:
          app: zoo1
      template:
        metadata:
           labels:
              app: zoo1
        spec:
           terminationGracePeriodSeconds: 60
           hostname: zoo1
           containers:
           - name: zoo1
             image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
             imagePullPolicy: IfNotPresent
             resources:
               requests:
                 cpu: 100m
                 memory: 204Mi
               limits:
                 cpu: 2000m
                 memory: 2048Mi
             ports:
             - containerPort: 2181
               name: httpa
             - containerPort: 2888
               name: httpb
             - containerPort: 3888
               name: httpc
             livenessProbe:
               tcpSocket:
                  port: 2181
               initialDelaySeconds: 60
               periodSeconds: 180
             env:
             - name: ZOO_MY_ID
               value: "1"
             - name: ZOO_SERVERS
               value: "server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888"                              
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    [root@master zookeepers]# vim zoo2.yaml 
    apiVersion: v1
    kind: Service
    metadata:
        namespace: pinpoint
        labels:
          app: zoo2
        name: zoo2
    spec:
       ports:
       - name: httpa
         targetPort: 2181
         port: 2181
       - name: httpb
         targetPort: 2888
         port: 2888
       - name: httpc
         targetPort: 3888
         port: 3888
       selector:
         app: zoo2
    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      namespace: pinpoint
      name: zoo2
      labels:
         app: zoo2
    spec:
      replicas: 1
      minReadySeconds: 120
      strategy:
        type: RollingUpdate
        rollingUpdate:
           maxSurge: 1
           maxUnavailable: 0
      selector:
        matchLabels:
          app: zoo2
      template:
        metadata:
           labels:
              app: zoo2
        spec:
           terminationGracePeriodSeconds: 60
           hostname: zoo2
           containers:
           - name: zoo2
             image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
             imagePullPolicy: IfNotPresent
             resources:
               requests:
                 cpu: 100m
                 memory: 204Mi
               limits:
                 cpu: 2000m
                 memory: 2048Mi
             ports:
             - containerPort: 2181
               name: httpa
             - containerPort: 2888
               name: httpb
             - containerPort: 3888
               name: httpc
             livenessProbe:
               tcpSocket:
                  port: 2181
               initialDelaySeconds: 60
               periodSeconds: 180
             env:
             - name: ZOO_MY_ID
               value: "2"
             - name: ZOO_SERVERS
               value: "server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888"
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    vim zoo3.yaml
    apiVersion: v1
    kind: Service
    metadata:
        namespace: pinpoint
        labels:
          app: zoo3
        name: zoo3
    spec:
       ports:
       - name: httpa
         targetPort: 2181
         port: 2181
       - name: httpb
         targetPort: 2888
         port: 2888
       - name: httpc
         targetPort: 3888
         port: 3888
       selector:
         app: zoo3
    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      namespace: pinpoint
      name: zoo3
      labels:
         app: zoo3
    spec:
      replicas: 1
      minReadySeconds: 120
      strategy:
        type: RollingUpdate
        rollingUpdate:
           maxSurge: 1
           maxUnavailable: 0
      selector:
        matchLabels:
          app: zoo3
      template:
        metadata:
           labels:
             app: zoo3
        spec:
           terminationGracePeriodSeconds: 60
           hostname: zoo3
           containers:
           - name: zoo3
             image: harbor.junengcloud.com/pinpoint/zookeeper:3.4
             imagePullPolicy: IfNotPresent
             resources:
               requests:
                 cpu: 100m
                 memory: 204Mi
               limits:
                 cpu: 2000m
                 memory: 2048Mi
             ports:
             - containerPort: 2181
               name: httpa
             - containerPort: 2888
               name: httpb
             - containerPort: 3888
               name: httpc
             livenessProbe:
               tcpSocket:
                  port: 2181
               initialDelaySeconds: 60
               periodSeconds: 180
             env:
             - name: ZOO_MY_ID
               value: "3"
             - name: ZOO_SERVERS
               value: "server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888"
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    kubectl apply -f  zoo1.yaml
    kubectl apply -f  zoo2.yaml
    kubectl apply -f  zoo3.yaml
    
    • 1
    • 2
    • 3
    [root@master zookeepers]# kubectl get pods -n pinpoint
    NAME                    READY   STATUS    RESTARTS   AGE
    zoo1-749d8cc498-bbx4g   1/1     Running   0          3h33m
    zoo2-695c9f8755-swnqb   1/1     Running   0          3h28m
    zoo3-6bb94d5568-45tft   1/1     Running   0          3h28m
    
    • 1
    • 2
    • 3
    • 4
    • 5
    [root@master zookeepers]# kubectl get svc -n pinpoint
    NAME   TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
    zoo1   ClusterIP   10.102.68.14     <none>        2181/TCP,2888/TCP,3888/TCP   178m
    zoo2   ClusterIP   10.108.226.102   <none>        2181/TCP,2888/TCP,3888/TCP   3h29m
    zoo3   ClusterIP   10.101.11.240    <none>        2181/TCP,2888/TCP,3888/TCP   3h29m
    [root@master zookeepers]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    [root@master zookeepers]# kubectl -n pinpoint exec -it zoo1-749d8cc498-bbx4g   -- zkServer.sh status
    ZooKeeper JMX enabled by default
    Using config: /conf/zoo.cfg
    Mode: follower
    You have new mail in /var/spool/mail/root
    [root@master zookeepers]# 
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    [root@master zookeepers]# kubectl -n pinpoint exec -it zoo2-695c9f8755-swnqb    -- zkServer.sh status
    ZooKeeper JMX enabled by default
    Using config: /conf/zoo.cfg
    Mode: follower
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    [root@master zookeepers]# kubectl -n pinpoint exec -it zoo3-6bb94d5568-45tft   -- zkServer.sh status
    ZooKeeper JMX enabled by default
    Using config: /conf/zoo.cfg
    Mode: leader
    You have new mail in /var/spool/mail/root
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    k8s搭建jobmanager.taskmanager

    配置Jobmanager

    [root@master zookeepers]# mkdir -p /data/yaml/pinpoint/jobmanager
    [root@master zookeepers]# cd /data/yaml/pinpoint/jobmanager/
    
    • 1
    • 2
    [root@master jobmanager]# vim deployment.yaml 
    kind: Deployment
    apiVersion: apps/v1
    metadata:
        namespace: pinpoint
        name: jobmanager
        labels:
           app: jobmanager
    spec:
       replicas: 1
       minReadySeconds: 60
       strategy:
         type: RollingUpdate
         rollingUpdate:
           maxSurge: 1
           maxUnavailable: 0
       selector:
          matchLabels:
            app: jobmanager
       template:
         metadata:
            labels:
               app: jobmanager
         spec:
            imagePullSecrets:
            - name: harbor
            terminationGracePeriodSeconds: 60
            hostname: jobmanager
            containers:
            - name: jobmanager
              image: harbor.junengcloud.com/pinpoint/flink:1.3.1
              args:
              - jobmanager
              resources:
                requests:
                    cpu: 100m
                    memory: 204Mi
                limits:
                    cpu: 2000m
                    memory: 2048Mi
              env:
              - name: JOB_MANAGER_RPC_ADDRESS
                value: jobmanager
              ports:
              - containerPort: 8081
                name: httpa
              - containerPort: 6123
                name: httpb
              livenessProbe:
                tcpSocket:
                   port: 8081
                initialDelaySeconds: 60
                periodSeconds: 180
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    [root@master jobmanager]# vim svc.yaml 
    kind: Service
    apiVersion: v1
    metadata:
          namespace: pinpoint
          name: jobmanager
          labels:
             app: jobmanager
    spec:
       ports:
       - name: httpa
         port: 8081
         targetPort: 8081
       - name: httpb
         port: 6123
         targetPort: 6123
       selector:
         app: jobmanager
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18

    配置taskMansger

    [root@master jobmanager]# mkdir -p /data/yaml/pinpoint/taskmanager
    [root@master jobmanager]# cd /data/yaml/pinpoint/taskmanager/
    
    • 1
    • 2
    [root@master taskmanager]# vim deployment.yaml 
    
    apiVersion: apps/v1
    kind: Deployment
    metadata:
          namespace: pinpoint
          name: taskmanager
          labels:
             app: taskmanager
    spec:
         replicas: 1
         minReadySeconds: 120
         strategy:
            type: RollingUpdate
            rollingUpdate:
               maxSurge: 1
               maxUnavailable: 0
         selector:
             matchLabels:
                 app: taskmanager
         template:
             metadata:
               labels:
                  app: taskmanager
             spec:
               imagePullSecrets:
               - name: harbor
               terminationGracePeriodSeconds: 60
               hostname: taskmanager
               containers:
               - name: taskmanager
                 image: harbor.junengcloud.com/pinpoint/flink:1.3.1
                 args:
                 - taskmanager
                 resources:
                   requests:
                      cpu: 100m
                      memory: 204Mi
                   limits:
                      cpu: 2000m
                      memory: 2048Mi
                 env:
                 - name: JOB_MANAGER_RPC_ADDRESS
                   value: jobmanager
                 ports:
                 - containerPort: 6121
                   name: httpa
                 - containerPort: 6122
                   name: httpb:
                 - containerPort: 19994
                   name: httpc
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    [root@master taskmanager]# vim svc.yaml 
    
    kind: Service
    apiVersion: v1
    metadata:
        namespace: pinpoint
        labels:
            app: taskmanager
        name: taskmanager
    spec:
       ports:
       - name: httpa
         port: 6121
         targetPort: 6121
       - name: httpb
         port: 6122
         targetPort: 6122
       - name: httpc
         port: 19994
         targetPort: 19994
       selector:
         app: taskmanager
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    [root@master taskmanager]# kubectl apply -f deployment.yaml 
    deployment.apps/taskmanager created
    [root@master taskmanager]# kubectl apply -f svc.yaml 
    service/taskmanager created
    
    • 1
    • 2
    • 3
    • 4

    配置Hbase

    [root@master taskmanager]# mkdir -p /data/yaml/pinpoint/pinpoint-hbase
    [root@master taskmanager]# cd /data/yaml/pinpoint/pinpoint-hbase/
    
    • 1
    • 2
    master:
    yum install nfs-utils -y
    mkdir /data/volumes -pv
    vim /etc/exports
    /data/volumes 192.168.10.0/24(rw,no_root_squash)
    
    exportfs -arv
    service nfs start
    systemctl enable nfs
    systemctl status nfs
    
    vim nfs.yaml
    apiVersion: v1
    kind: Pod
    metadata:
      name: test-nfs-volume
    spec:
      containers:
      - name: test-nfs
        image: nginx:latest
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
           protocol: TCP
        volumeMounts:
        - name: nfs-volumes
          mountPath: /usr/share/nginx/html
      volumes:
      - name: nfs-volumes
        nfs:
           path: /data/volumes
           server: 192.168.10.140
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    kubectl apply -f nfs.yaml
    
    node1/node2
    docker load -i nfs-subdir-external-provisioner.tar.gz
    
    master:
    vim sericeaccount.yaml
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: nfs-provisioner
      
      
    kubectl create clusterrolebinding nfs-provisioner --clusterrole=cluster-admin --serviceaccount=default:nfs-provisioner
    
    
    安装nfs-provisioner
    mkdir /data/nfs_pro -p
    mkdir /data/volume_test/v1
    
    vim /etc/exports
    /data/volumes 192.168.10.0/24(rw,no_root_squash)
    /data/nfs_pro 192.168.10.0/24(rw,no_root_squash)
    /data/volume_test/v1 192.168.10.0/24(rw,no_root_squash)             
    
    exportfs -arv
    systemctl restart nfs
    
    
    vim nfs-deployment.yaml
    kind: Deployment
    apiVersion: apps/v1
    metadata:
       name: nfs-provisioner
    spec:
       selector:
          matchLabels:
                app: nfs-provisioner
       replicas: 1
       strategy:
          type: Recreate
       template:
          metadata:
             labels:
                app: nfs-provisioner
          spec:
             serviceAccount: nfs-provisioner
             containers:
             - name: nfs-provisioner
               image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0
               imagePullPolicy: IfNotPresent
               volumeMounts:
               - name: nfs-client-root
                 mountPath: /persistentvolumes
               env:
               - name: PROVISIONER_NAME
                 value: example.com/nfs
               - name: NFS_SERVER
                 value: 192.168.10.140
               - name: NFS_PATH
                 value: /data/nfs_pro
             volumes:
             - name: nfs-client-root
               nfs:
                 server: 192.168.10.140
                 path: /data/nfs_pro
                 
    kubectl apply -f nfs-deployment.yaml
    
    创建storageclass 提供PV
    vim nfs-storageclass.yaml
    
    kind: StorageClass
    apiVersion: storage.k8s.io/v1
    metadata:
       name: local-storage
    provisioner: example.com/nfs
    
    
    创建pvc,通过storageclass动态生成pv
    vim  claim.yaml
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
        name: test-claim1
    spec:
       accessModes: ["ReadWriteMany"]
       resources:
          requests:
              storage: 1Gi
       storageClassName: local-storage
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    node1/node2
    yum install nfs-utils -y
    service nfs start
    systemctl enable nfs
    
    node1:
    mkdir /data1
    mount -t nfs 192.168.10.140:/data/volumes /data1
    
    node2:
    mkdir /data2
    mount -t nfs 192.168.10.140:/data/volumes /data2
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    [root@master pinpoint-hbase]# vim pvc.yaml 
    
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
       namespace: pinpoint
       name: pinpoint-zookeeper
    spec:
       accessModes:
       - ReadWriteOnce
       resources:
         requests:
            storage: 10Gi
       storageClassName:  local-storage
    ---
    kind: PersistentVolumeClaim
    apiVersion: v1
    metadata:
        namespace: pinpoint
        name: pinpoint-hbase
    spec:
        accessModes:
        - ReadWriteOnce
        resources:
           requests:
              storage: 10Gi
        storageClassName: local-storage
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    [root@master pinpoint-hbase]# vim pv.yaml 
    
    kind: PersistentVolume
    apiVersion: v1
    metadata:
      namespace: pinpoint
      name: pinpoint-hbase
    spec:
      capacity:
         storage: 10Gi
      volumeMode: Filesystem
      accessModes:
      - ReadWriteOnce
      persistentVolumeReclaimPolicy: Delete
      storageClassName: local-storage
      local:
        path: /data/yaml/pinpoint/pinpoint-hbase
      nodeAffinity:
        required:
           nodeSelectorTerms:
           - matchExpressions:
             - key: kubernetes.io/hostname
               operator: In
               values:
               - worker1(hostname)
    ---
    kind: PersistentVolume
    apiVersion: v1
    metadata:
       namespace: pinpoint
       name: pinpoint-zookeeper
    spec:
       capacity:
          storage: 10Gi
       volumeMode: Filesystem
       accessModes:
       - ReadWriteOnce
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    [root@master pinpoint-hbase]# vim sts.yaml 
    
    kind: StatefulSet
    apiVersion: apps/v1
    metadata:
       name: pinpoint-hbase
       namespace: pinpoint
    spec:
       selector:
         matchLabels:
              app: pinpoint-hbase
       serviceName: pinpoint-hbase
       replicas: 1
       updateStrategy:
          type: RollingUpdate
       template:
         metadata:
           labels:
              app: pinpoint-hbase
         spec:
           nodeName: worker1
           terminationGracePeriodSeconds: 60
           containers:
           - name: pinpoint-hbase
             imagePullPolicy: IfNotPresent
             image: docker.io/pinpointdocker/pinpoint-hbase:latest
             resources:
                requests:
                   cpu: 100m
                   memory: 2048Mi
                limits:
                   cpu: 2000m
                   memory: 7Gi
             ports:
             - containerPort: 60000
               protocol: TCP
               name: httpa
             - containerPort: 16010
               protocol: TCP
               name: httpb
             - containerPort: 60020
               protocol: TCP
               name: httpc
             - containerPort: 16030
               protocol: TCP
               name: httpd
             livenessProbe:
               tcpSocket:
                 port: 60000
               initialDelaySeconds: 180
               periodSeconds: 120
             volumeMounts:
             - mountPath: /etc/localtime
               readOnly: false
               name: time-data
             - mountPath: /home/pinpoint/hbase
               name: hbase
             - mountPath: /home/pinpoint/zookeeper
               name: zookeeper
           volumes:
           - name: time-data
             hostPath:
              path: /usr/share/zoneinfo/Asia/Shanghai
           - name: hbase
             persistentVolumeClaim:
                claimName: pinpoint-hbase
           - name: zookeeper
             persistentVolumeClaim:
                 claimName: pinpoint-zookeeper
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    [root@master pinpoint-hbase]# vim svc.yaml 
    
    kind: Service
    apiVersion: v1
    metadata:
      namespace: pinpoint
      labels:
         app: pinpoint-hbase
      name: pinpoint-hbase
    spec:
      ports:
      - name: httpa
        port: 60000
        targetPort: 60000
      - name: httpb
        port: 16010
        targetPort: 16010
      - name: httpc
        port: 60020
        targetPort: 60020
      - name: httpd
        port: 16030
        targetPort: 16030
      selector:
        app: pinpoint-hbase
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25

    配置pinpoint-collector

    [root@master ~]# mkdir -p /data/yaml/pinpoint/pinpoint-collector
    [root@master ~]# cd /data/yaml/pinpoint/pinpoint-collector/
    [root@master pinpoint-collector]# vim deployment.yaml
    
    • 1
    • 2
    • 3
    [root@master pinpoint-collector]# vim deployment.yaml 
    
    kind: Deployment
    apiVersion: apps/v1
    metadata:
       namespace: pinpoint
       name: pinpoint-collector
       labels:
         app: pinpoint-collector
    spec:
       replicas: 1
       minReadySeconds: 120
       strategy:
           type: RollingUpdate
           rollingUpdate:
              maxSurge: 1
              maxUnavailable: 0
       selector:
         matchLabels:
           app: pinpoint-collector
       template:
         metadata:
           labels:
              app: pinpoint-collector
         spec:
           imagePullSecrets:
           - name: harbor
           terminationGracePeriodSeconds: 60
           hostname: pinpoint-collector
           containers:
           - name:  pinpoint-collector
             image: harbor.junengcloud.com/pinpoint/pinpoint-collector:2.1.0
             resources:
               requests:
                 cpu: 200m
                 memory: 307Mi
               limits:
                 cpu: 3000m
                 memory: 4Gi
             ports:
             - containerPort: 9994
               name: httpa
             - containerPort: 9992
               name: httpb
             - containerPort: 9993
               name: httpc
             - containerPort: 9995
               name: httpd
             - containerPort: 9996
               name: httpe
             - containerPort: 9997
               name: httpf
             - containerPort: 9998
               protocol: UDP
               name: httpg
             - containerPort: 9999
               protocol: UDP
               name: httph
             livenessProbe:
               tcpSocket:
                   port: 9991
               initialDelaySeconds: 60
               periodSeconds: 180
             env:
             - name: CLUSTER_ENABLE
               value: "true"
             - name: FLINK_CLUSTER_ENABLE
               value: "true"
             - name: FLINK_CLUSTER_ZOOKEEPER_ADDRESS
               value: "zoo1"
             - name: PINPOINT_ZOOKEEPER_ADDRESS
               value: "zoo1"
             - name: SPRING_PROFILES_ACTIVE
               value: "release"
             - name: HBASE_CLIENT_HOST
             - name: HBASE_CLIENT_HOST
               value: "zoo1"
             - name: HBASE_HOST
               value: "zoo1"
             - name: HBASE_PORT
               value: "2181"
             - name: COLLECTOR_RECEIVER_GRPC_SPAN_WORKER_EXECUTOR_THREAD_SIZE
               value: "256"
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    vim svc.yaml
    
    apiVersion: v1
    kind: Service
    metadata:
      namespace: pinpoint
      labels:
        app: pinpoint-collector
      name: pinpoint-collector
    spec:
      ports:
      - name: httpa
        port: 9991
        targetPort: 9991
        nodePort: 30091
      - name: httpb
        port: 9992
        targetPort: 9992
        nodePort: 30092
      - name: httpc
        port: 9993
        targetPort: 9993
        nodePort: 30093
      - name: httpd
        port: 9994
        targetPort: 9994
        nodePort: 30094
      - name: httpe
        port: 9995
        targetPort: 9995
        nodePort: 30095
      - name: httpf
        port: 9996
        targetPort: 9996
        nodePort: 30096
      - name: httpg
        port: 9995
        protocol: UDP
        targetPort: 30095
      - name: httph
        port: 9996
        protocol: UDP
        targetPort: 30096
      selector:
        app: pinpoint-collector
      type: NodePort
      
      
    kubectl apply -f deployment.yaml
    kubectl apply -f svc.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50

    配置pinpoint-web

    mkdir -p /data/yaml/pinpoint/pinpoint-web
    cd /data/yaml/pinpoint/pinpoint-web
    docker pull pinpointdocker/pinpoint-web
    
    • 1
    • 2
    • 3
    vim deployment.yaml
    
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      namespace: pinpoint
      name: pinpoint-web
      labels:
        app: pinpoint-web
    spec:
      replicas: 1
      minReadySeconds: 120
      strategy:
        type: RollingUpdate
        rollingUpdate:
          maxSurge: 1
          maxUnavailable: 0
      selector:
        matchLabels:
          app: pinpoint-web
      template:
        metadata:
          labels:
            app: pinpoint-web
        spec:
          terminationGracePeriodSeconds: 60
          hostname: pinpoint-web
          containers:
          - name: pinpoint-web
            image: docker.io/pinpointdocker/pinpoint-web:latest
            resources:
              requests:
                cpu: 100m
                memory: 204Mi
              limits:
                cpu: 2000m
                memory: 2048Mi
            env:
            - name: ADMIN_PASSWORD
              value: "admin"
            - name: ALARM_MAIL_DEBUG
              value: "false"
            - name: ALARM_MAIL_SENDER_ADDRESS
              value: "762359676@qq.com"
            - name: ALARM_MAIL_SERVER_PASSWORD
              value: "mjh123"
            - name: ALARM_MAIL_SERVER_PORT
              value: "465"
            - name: ALARM_MAIL_SERVER_URL
              value: "192.168.10.140"
            - name: ALARM_MAIL_SERVER_USERNAME
              value: "system"
            - name: ALARM_MAIL_SMTP_AUTH
              value: "false"
            - name: ALARM_MAIL_SMTP_PORT
              value: "25"
            - name: ALARM_MAIL_SMTP_STARTTLS_ENABLE
              value: "false"
            - name: ALARM_MAIL_SMTP_STARTTLS_REQUIRED
              value: "false"
            - name: ALARM_MAIL_TRANSPORT_PROTOCOL
              value: "smtp"
            - name: BATCH_ENABLE
              value: "false"
            - name: BATCH_FLINK_SERVER
              value: "jobmanager"
            - name: BATCH_SERVER_IP
              value: "127.0.0.1"
            - name: CLUSTER_ENABLE
              value: "true"
            - name: CONFIG_SENDUSAGE
              value: "true"
            - name: CONFIG_SHOW_APPLICATIONSTAT
              value: "true"
            - name: JDBC_DRIVERCLASSNAME
              value: "com.mysql.jdbc.Driver"
            - name: JDBC_PASSWORD
              value: "klvchen"
            - name: JDBC_URL
              value: "jdbc:mysql://192.168.10.140:3307/pinpoint?characterEncoding=UTF-8"
            - name: JDBC_USERNAME
              value: "root"
            - name: LOGGING_LEVEL_ROOT
              value: "INFO"
            - name: PINPOINT_ZOOKEEPER_ADDRESS
              value: "zoo1"
            - name: SERVER_PORT
              value: "8079"
            - name: SPRING_PROFILES_ACTIVE
              value: "release,batch"
            ports:
            - containerPort: 9997
              name: http
            - containerPort: 8079
              name: web
            livenessProbe:
              tcpSocket:
                port: 8079
              initialDelaySeconds: 60
              periodSeconds: 180
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    vim svc.yaml
    
    apiVersion: v1
    kind: Service
    metadata:
      namespace: pinpoint
      labels:
        app: pinpoint-web
      name: pinpoint-web
    spec:
      type: NodePort
      ports:
      - name: http
        port: 9997
        targetPort: 9997
        nodePort: 30097
      - name: web
        port: 8079
        targetPort: 8079
        nodePort: 30079
      selector:
        app: pinpoint-web
    
    
    kubectl apply -f deployment.yaml 
    kubectl apply -f svc.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    192.168.10.140:30079
    
    • 1

    [外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-1olgrWLZ-1656905422258)(C:\Users\Administrator\AppData\Roaming\Typora\typora-user-images\image-20220702110251937.png)]

    构建测试镜像,进行测试

    构建测试镜像

    所需要agent的服务器
    1、下载agent
    mkdir -p /data/demo
    cd /data/demo
    wget https://github.com/pinpoint-apm/pinpoint/releases/download/v2.1.0/pinpoint-agent-2.1.0.tar.gz
    
    • 1
    • 2
    • 3
    • 4
    • 5
    tar zxvf pinpoint-agent-2.1.0.tar.gz
    cd /data/demo/pinpoint-agent/profiles/release
    cp pinpoint.config  pinpoint.config.ori
    
    • 1
    • 2
    • 3
    2、vim pinpoint.config
    把 profiler.transport.grpc.collector.ip 的值指向 pinpoint colector 地址
    
    profiler.transport.grpc.collector.ip=192.168.10.140
    
    • 1
    • 2
    • 3
    • 4
    3、cd /data/demo
     把 需要的jar包 放到这里,这个jar包的功能会起一个服务,访问 /hello 接口会返回 hello spring
    
    • 1
    • 2
    vim DockerFile
    
    FROM openjdk:8u302-slim
    RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai  /etc/localtime
    ADD demo-0.0.1-SNAPSHOT.jar demo.jar(需要的jar包)
    ADD pinpoint-agent-2.1.0 /pinpoint-agent/
    COPY docker-entrypoint.sh /
    CMD [ "/bin/bash", "/docker-entrypoint.sh" ]
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    # agentId 最大支持24个字母,必须不同,若是 K8S 多个副本,applicationName 必须一样
    
    vim docker-entrypoint.sh
    
    #!/bin/bash
    java -javaagent:/pinpoint-agent/pinpoint-bootstrap-2.1.0.jar  -Dpinpoint.agentId=${HOSTNAME} -Dpinpoint.applicationName=${appName} ${JVM:-Xmx512m -Xms512m} -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp/ -Djava.security.egd=file:/dev/./urandom -Duser.timezone=GMT+08 -jar /${appName}.jar
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    发送给镜像仓库

    docker build -t harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11 .
    docker push harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11
    
    • 1
    • 2

    测试,在master中创建

    mkdir -p /data/yaml/default/pinpoint-demo
    cd /data/yaml/default/pinpoint-demo
    
    • 1
    • 2
    vim deployment.yaml
    
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: demo
    spec:
      replicas: 2
      selector:
        matchLabels:
          app: demo
      template:
        metadata:
          labels:
            app: demo
        spec:
          containers:
          - name: demo
            image: harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11(镜像仓库)
            imagePullPolicy: IfNotPresent
            env:
            - name: JVM
              value: "-Xms1024m -Xmx1024m"
            - name: appName
              valueFrom:
                fieldRef:
                  fieldPath: metadata.labels['app'] 
    
    kubectl apply -f  deployment.yml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    vim svc.yaml
    
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        app: pinpoint-demo
      name: pinpoint-demo
    spec:
      ports:
      - name: http
        port: 8080
        targetPort: 8080
        nodePort: 30079
      selector:
        app: pinpoint-demo
      type: NodePort
    
    kubectl apply -f  svc.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19

    +08 -jar /${appName}.jar

    
    > 发送给镜像仓库
    
    
    • 1
    • 2
    • 3

    docker build -t harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11 .
    docker push harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11

    
    
    
    # 测试,在master中创建
    
    
    • 1
    • 2
    • 3
    • 4
    • 5

    mkdir -p /data/yaml/default/pinpoint-demo
    cd /data/yaml/default/pinpoint-demo

    
    
    • 1

    vim deployment.yaml

    apiVersion: apps/v1
    kind: Deployment
    metadata:
    name: demo
    spec:
    replicas: 2
    selector:
    matchLabels:
    app: demo
    template:
    metadata:
    labels:
    app: demo
    spec:
    containers:
    - name: demo
    image: harbor.junengcloud.com/tmp/pinpoint-demo:0.0.11(镜像仓库)
    imagePullPolicy: IfNotPresent
    env:
    - name: JVM
    value: “-Xms1024m -Xmx1024m”
    - name: appName
    valueFrom:
    fieldRef:
    fieldPath: metadata.labels[‘app’]

    kubectl apply -f deployment.yml

    
    
    • 1

    vim svc.yaml

    apiVersion: v1
    kind: Service
    metadata:
    labels:
    app: pinpoint-demo
    name: pinpoint-demo
    spec:
    ports:

    • name: http
      port: 8080
      targetPort: 8080
      nodePort: 30079
      selector:
      app: pinpoint-demo
      type: NodePort

    kubectl apply -f svc.yaml

    
    
    
    • 1
    • 2
  • 相关阅读:
    maven的安装和配置
    线程中开启线程池异步处理,主副线程设置执行顺序
    内置数据库H2和内置Redis(测试结果来啦)
    C++多线程编程:其六、unique_lock的使用
    52 https
    java计算机毕业设计智慧医疗医患交流系统设计MyBatis+系统+LW文档+源码+调试部署
    Three.js本地环境搭建
    一个23岁的男孩没技术能不能学软件测试,是不是注定要打工一辈子?
    【数据结构】ArrayList与顺序表
    如何写好软件任务书
  • 原文地址:https://blog.csdn.net/HYMajor/article/details/125596787