• kubernetes(5) 续4


    目录

    volumes

    emptyDir卷

    hostpath卷

    nfs卷

    持久卷

    storageclass

    statefulset控制器

    mysql主从部署


    volumes

    emptyDir卷

    1. [root@k8s2 volumes]# vim emptydir.yaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: vol1
    6. spec:
    7. containers:
    8. - image: busyboxplus
    9. name: vm1
    10. command: ["sleep", "300"]
    11. volumeMounts:
    12. - mountPath: /cache
    13. name: cache-volume
    14. - name: vm2
    15. image: nginx
    16. volumeMounts:
    17. - mountPath: /usr/share/nginx/html
    18. name: cache-volume
    19. volumes:
    20. - name: cache-volume
    21. emptyDir:
    22. medium: Memory
    23. sizeLimit: 100Mi

    1. [root@k8s2 volumes]# kubectl apply -f emptydir.yaml
    2. [root@k8s2 volumes]# kubectl get pod
    3. [root@k8s2 volumes]# kubectl exec vol1 -c vm1 -it -- sh
    4. / # cd /cache/
    5. /cache # ls
    6. /cache # curl localhost
    7. /cache # echo www.westos.org > index.html
    8. /cache # curl localhost
    9. /cache # dd if=/dev/zero of=bigfile bs=1M count=200
    10. /cache # du -h bigfile

    hostpath卷

    1. [root@k8s2 volumes]# vim hostpath.yaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: vol2
    6. spec:
    7. nodeName: k8s4
    8. containers:
    9. - image: nginx
    10. name: test-container
    11. volumeMounts:
    12. - mountPath: /usr/share/nginx/html
    13. name: test-volume
    14. volumes:
    15. - name: test-volume
    16. hostPath:
    17. path: /data
    18. type: DirectoryOrCreate

    1. [root@k8s2 volumes]# kubectl apply -f hostpath.yaml
    2. [root@k8s2 volumes]# kubectl get pod -o wide
    3. [root@k8s4 data]# echo www.westos.org > index.html
    4. [root@k8s2 volumes]# curl 10.244.106.140

    nfs卷

    配置nfsserver

    1. [root@k8s1 ~]# yum install -y nfs-utils
    2. [root@k8s1 ~]# vim /etc/exports
    3. /nfsdata *(rw,sync,no_root_squash)
    4. [root@k8s1 ~]# mkdir -m 777 /nfsdata
    5. [root@k8s1 ~]# systemctl enable --now nfs
    6. [root@k8s1 ~]# showmount -e

    1. [root@k8s2 volumes]# vim nfs.yaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: nfs
    6. spec:
    7. containers:
    8. - image: nginx
    9. name: test-container
    10. volumeMounts:
    11. - mountPath: /usr/share/nginx/html
    12. name: test-volume
    13. volumes:
    14. - name: test-volume
    15. nfs:
    16. server: 192.168.81.10
    17. path: /nfsdata

    需要在所有k8s节点上安装nfs-utils软件包

    yum install -y nfs-utils
    

    没有安装会有以下错误

    1. [root@k8s2 volumes]# kubectl apply -f nfs.yaml
    2. [root@k8s2 volumes]# kubectl get pod -o wide
    3. 在nfsserver端创建测试页
    4. [root@k8s1 ~]# cd /nfsdata/
    5. [root@k8s1 nfsdata]# echo www.westos.org > index.html
    6. [root@k8s2 volumes]# curl 10.244.106.141
    7. www.westos.org

    持久卷

    配置nfs输出目录

    1. [root@k8s1 ~]# cd /nfsdata/
    2. [root@k8s1 nfsdata]# mkdir pv1 pv2 pv3

    创建静态pv

    1. [root@k8s2 pv]# vim pv.yaml
    2. apiVersion: v1
    3. kind: PersistentVolume
    4. metadata:
    5. name: pv1
    6. spec:
    7. capacity:
    8. storage: 5Gi
    9. volumeMode: Filesystem
    10. accessModes:
    11. - ReadWriteOnce
    12. persistentVolumeReclaimPolicy: Recycle
    13. storageClassName: nfs
    14. nfs:
    15. path: /nfsdata/pv1
    16. server: 192.168.81.10
    17. ---
    18. apiVersion: v1
    19. kind: PersistentVolume
    20. metadata:
    21. name: pv2
    22. spec:
    23. capacity:
    24. storage: 10Gi
    25. volumeMode: Filesystem
    26. accessModes:
    27. - ReadWriteMany
    28. persistentVolumeReclaimPolicy: Recycle
    29. storageClassName: nfs
    30. nfs:
    31. path: /nfsdata/pv2
    32. server: 192.168.81.10
    33. ---
    34. apiVersion: v1
    35. kind: PersistentVolume
    36. metadata:
    37. name: pv3
    38. spec:
    39. capacity:
    40. storage: 15Gi
    41. volumeMode: Filesystem
    42. accessModes:
    43. - ReadOnlyMany
    44. persistentVolumeReclaimPolicy: Recycle
    45. storageClassName: nfs
    46. nfs:
    47. path: /nfsdata/pv3
    48. server: 192.168.81.10

    1. [root@k8s2 pv]# kubectl apply -f pv.yaml
    2. [root@k8s2 pv]# kubectl get pv

    创建pvc

    1. [root@k8s2 pv]# vim pvc.yaml
    2. apiVersion: v1
    3. kind: PersistentVolumeClaim
    4. metadata:
    5. name: pvc1
    6. spec:
    7. storageClassName: nfs
    8. accessModes:
    9. - ReadWriteOnce
    10. resources:
    11. requests:
    12. storage: 1Gi
    13. ---
    14. apiVersion: v1
    15. kind: PersistentVolumeClaim
    16. metadata:
    17. name: pvc2
    18. spec:
    19. storageClassName: nfs
    20. accessModes:
    21. - ReadWriteMany
    22. resources:
    23. requests:
    24. storage: 10Gi
    25. ---
    26. apiVersion: v1
    27. kind: PersistentVolumeClaim
    28. metadata:
    29. name: pvc3
    30. spec:
    31. storageClassName: nfs
    32. accessModes:
    33. - ReadOnlyMany
    34. resources:
    35. requests:
    36. storage: 15Gi

    1. [root@k8s2 pv]# kubectl apply -f pvc.yaml
    2. [root@k8s2 pv]# kubectl get pvc
    3. [root@k8s2 pv]# kubectl get pv

    创建pod

    1. [root@k8s2 pv]# vim pod.yaml
    2. apiVersion: v1
    3. kind: Pod
    4. metadata:
    5. name: test-pd
    6. spec:
    7. containers:
    8. - image: nginx
    9. name: nginx
    10. volumeMounts:
    11. - mountPath: /usr/share/nginx/html
    12. name: vol1
    13. volumes:
    14. - name: vol1
    15. persistentVolumeClaim:
    16. claimName: pvc1

    在nfs输出目录中创建测试页

    1. [root@k8s2 pv]# kubectl apply -f pod.yaml
    2. [root@k8s1 pv1]# echo pv1 > index.html
    3. [root@k8s2 pv]# kubectl get pod -o wide
    4. [root@k8s2 pv]# curl 10.244.106.144

    回收资源,需要按顺序回收:pod->pvc->pv

    1. [root@k8s2 pv]# kubectl delete pod test-pd
    2. [root@k8s2 pv]# kubectl delete -f pvc.yaml

    回收pvc后,pv会被回收再利用

    [root@k8s2 pv]# kubectl get pv
    

    pv的回收需要拉取镜像,提前在node节点导入镜像:registry.k8s.io/debian-base:v2.0.0

    registry.k8s.io 替代 k8s.gcr.io 这个仓库,但依然需要魔法上网

    1. containerd 导入镜像
    2. [root@k8s3 ~]# ctr -n=k8s.io image import debian-base.tar
    3. [root@k8s4 ~]# ctr -n=k8s.io image import debian-base.tar
    4. docker
    5. [root@k8s3 ~]# docker pull registry.k8s.io/debian-base:v2.0.0
    6. [root@k8s2 pv]# kubectl delete -f pv.yaml

    上传镜像

    创建sa并授权

    1. [root@k8s2 nfs]# vim rbac.yaml
    2. apiVersion: v1
    3. kind: Namespace
    4. metadata:
    5. name: nfs-client-provisioner
    6. ---
    7. apiVersion: v1
    8. kind: ServiceAccount
    9. metadata:
    10. name: nfs-client-provisioner
    11. namespace: nfs-client-provisioner
    12. ---
    13. kind: ClusterRole
    14. apiVersion: rbac.authorization.k8s.io/v1
    15. metadata:
    16. name: nfs-client-provisioner-runner
    17. rules:
    18. - apiGroups: [""]
    19. resources: ["nodes"]
    20. verbs: ["get", "list", "watch"]
    21. - apiGroups: [""]
    22. resources: ["persistentvolumes"]
    23. verbs: ["get", "list", "watch", "create", "delete"]
    24. - apiGroups: [""]
    25. resources: ["persistentvolumeclaims"]
    26. verbs: ["get", "list", "watch", "update"]
    27. - apiGroups: ["storage.k8s.io"]
    28. resources: ["storageclasses"]
    29. verbs: ["get", "list", "watch"]
    30. - apiGroups: [""]
    31. resources: ["events"]
    32. verbs: ["create", "update", "patch"]
    33. ---
    34. kind: ClusterRoleBinding
    35. apiVersion: rbac.authorization.k8s.io/v1
    36. metadata:
    37. name: run-nfs-client-provisioner
    38. subjects:
    39. - kind: ServiceAccount
    40. name: nfs-client-provisioner
    41. namespace: nfs-client-provisioner
    42. roleRef:
    43. kind: ClusterRole
    44. name: nfs-client-provisioner-runner
    45. apiGroup: rbac.authorization.k8s.io
    46. ---
    47. kind: Role
    48. apiVersion: rbac.authorization.k8s.io/v1
    49. metadata:
    50. name: leader-locking-nfs-client-provisioner
    51. namespace: nfs-client-provisioner
    52. rules:
    53. - apiGroups: [""]
    54. resources: ["endpoints"]
    55. verbs: ["get", "list", "watch", "create", "update", "patch"]
    56. ---
    57. kind: RoleBinding
    58. apiVersion: rbac.authorization.k8s.io/v1
    59. metadata:
    60. name: leader-locking-nfs-client-provisioner
    61. namespace: nfs-client-provisioner
    62. subjects:
    63. - kind: ServiceAccount
    64. name: nfs-client-provisioner
    65. namespace: nfs-client-provisioner
    66. roleRef:
    67. kind: Role
    68. name: leader-locking-nfs-client-provisioner
    69. apiGroup: rbac.authorization.k8s.io
    70. [root@k8s2 nfs]# kubectl apply -f rbac.yaml

    部署应用

    1. [root@k8s2 nfs]# vim deployment.yaml
    2. apiVersion: apps/v1
    3. kind: Deployment
    4. metadata:
    5. name: nfs-client-provisioner
    6. labels:
    7. app: nfs-client-provisioner
    8. namespace: nfs-client-provisioner
    9. spec:
    10. replicas: 1
    11. strategy:
    12. type: Recreate
    13. selector:
    14. matchLabels:
    15. app: nfs-client-provisioner
    16. template:
    17. metadata:
    18. labels:
    19. app: nfs-client-provisioner
    20. spec:
    21. serviceAccountName: nfs-client-provisioner
    22. containers:
    23. - name: nfs-client-provisioner
    24. image: sig-storage/nfs-subdir-external-provisioner:v4.0.2
    25. volumeMounts:
    26. - name: nfs-client-root
    27. mountPath: /persistentvolumes
    28. env:
    29. - name: PROVISIONER_NAME
    30. value: k8s-sigs.io/nfs-subdir-external-provisioner
    31. - name: NFS_SERVER
    32. value: 192.168.81.10
    33. - name: NFS_PATH
    34. value: /nfsdata
    35. volumes:
    36. - name: nfs-client-root
    37. nfs:
    38. server: 192.168.81.10
    39. path: /nfsdata
    40. [root@k8s2 nfs]# kubectl apply -f deployment.yaml
    41. [root@k8s2 nfs]# kubectl -n nfs-client-provisioner get pod

    创建存储类

    1. [root@k8s2 nfs]# vim class.yaml
    2. apiVersion: storage.k8s.io/v1
    3. kind: StorageClass
    4. metadata:
    5. name: nfs-client
    6. provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
    7. parameters:
    8. archiveOnDelete: "false"
    9. [root@k8s2 nfs]# kubectl apply -f class.yaml
    10. [root@k8s2 nfs]# kubectl get sc

    创建pvc类

    1. [root@k8s2 nfs]# vim pvc.yaml
    2. kind: PersistentVolumeClaim
    3. apiVersion: v1
    4. metadata:
    5. name: test-claim
    6. spec:
    7. storageClassName: nfs-client
    8. accessModes:
    9. - ReadWriteMany
    10. resources:
    11. requests:
    12. storage: 1Gi
    13. [root@k8s2 nfs]# kubectl apply -f pvc.yaml
    14. [root@k8s2 nfs]# kubectl get pvc

     

    创建pod

    1. [root@k8s2 nfs]# vim pod.yaml
    2. kind: Pod
    3. apiVersion: v1
    4. metadata:
    5. name: test-pod
    6. spec:
    7. containers:
    8. - name: test-pod
    9. image: busybox
    10. command:
    11. - "/bin/sh"
    12. args:
    13. - "-c"
    14. - "touch /mnt/SUCCESS && exit 0 || exit 1"
    15. volumeMounts:
    16. - name: nfs-pvc
    17. mountPath: "/mnt"
    18. restartPolicy: "Never"
    19. volumes:
    20. - name: nfs-pvc
    21. persistentVolumeClaim:
    22. claimName: test-claim
    23. [root@k8s2 nfs]# kubectl apply -f pod.yaml

    pod会在pv中创建一个文件

    设置默认存储类,这样在创建pvc时可以不用指定storageClassName

    [root@k8s2 pvc]# kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
    

    statefulset控制器

    1. [root@k8s2 statefulset]# vim headless.yaml
    2. apiVersion: v1
    3. kind: Service
    4. metadata:
    5. name: nginx-svc
    6. labels:
    7. app: nginx
    8. spec:
    9. ports:
    10. - port: 80
    11. name: web
    12. clusterIP: None
    13. selector:
    14. app: nginx
    15. [root@k8s2 statefulset]# kubectl apply -f headless.yaml
    16. [root@k8s2 statefulset]# kubectl get svc

    1. [root@k8s2 statefulset]# vim statefulset.yaml
    2. apiVersion: apps/v1
    3. kind: StatefulSet
    4. metadata:
    5. name: web
    6. spec:
    7. serviceName: "nginx-svc"
    8. replicas: 3
    9. selector:
    10. matchLabels:
    11. app: nginx
    12. template:
    13. metadata:
    14. labels:
    15. app: nginx
    16. spec:
    17. containers:
    18. - name: nginx
    19. image: nginx
    20. volumeMounts:
    21. - name: www
    22. mountPath: /usr/share/nginx/html
    23. volumeClaimTemplates:
    24. - metadata:
    25. name: www
    26. spec:
    27. storageClassName: nfs-client
    28. accessModes:
    29. - ReadWriteOnce
    30. resources:
    31. requests:
    32. storage: 1Gi
    33. [root@k8s2 statefulset]# kubectl apply -f statefulset.yaml
    34. [root@k8s2 statefulset]# kubectl get pod

    在nfs输出目录创建测试页

    1. [root@k8s2 statefulset]# kubectl run demo --image busyboxplus -it
    2. / # curl web-0.nginx-svc
    3. / # curl web-1.nginx-svc
    4. / # curl web-2.nginx-svc

    statefulset有序回收

    1. [root@k8s2 statefulset]# kubectl scale statefulsets web --replicas=0
    2. [root@k8s2 statefulset]# kubectl delete -f statefulset.yaml
    3. [root@k8s2 statefulset]# kubectl delete pvc --all

    上传镜像

     

    1. [root@k8s2 mysql]# vim configmap.yaml
    2. apiVersion: v1
    3. kind: ConfigMap
    4. metadata:
    5. name: mysql
    6. labels:
    7. app: mysql
    8. app.kubernetes.io/name: mysql
    9. data:
    10. primary.cnf: |
    11. [mysqld]
    12. log-bin
    13. replica.cnf: |
    14. [mysqld]
    15. super-read-only
    16. [root@k8s2 mysql]# kubectl apply -f configmap.yaml

    1. [root@k8s2 mysql]# vim svc.yaml
    2. apiVersion: v1
    3. kind: Service
    4. metadata:
    5. name: mysql
    6. labels:
    7. app: mysql
    8. app.kubernetes.io/name: mysql
    9. spec:
    10. ports:
    11. - name: mysql
    12. port: 3306
    13. clusterIP: None
    14. selector:
    15. app: mysql
    16. ---
    17. apiVersion: v1
    18. kind: Service
    19. metadata:
    20. name: mysql-read
    21. labels:
    22. app: mysql
    23. app.kubernetes.io/name: mysql
    24. readonly: "true"
    25. spec:
    26. ports:
    27. - name: mysql
    28. port: 3306
    29. selector:
    30. app: mysql
    31. [root@k8s2 mysql]# kubectl apply -f svc.yaml
    32. [root@k8s2 mysql]# kubectl get svc

    1. [root@k8s2 mysql]# vim statefulset.yaml
    2. apiVersion: apps/v1
    3. kind: StatefulSet
    4. metadata:
    5. name: mysql
    6. spec:
    7. selector:
    8. matchLabels:
    9. app: mysql
    10. app.kubernetes.io/name: mysql
    11. serviceName: mysql
    12. replicas: 0
    13. template:
    14. metadata:
    15. labels:
    16. app: mysql
    17. app.kubernetes.io/name: mysql
    18. spec:
    19. initContainers:
    20. - name: init-mysql
    21. image: mysql:5.7
    22. command:
    23. - bash
    24. - "-c"
    25. - |
    26. set -ex
    27. # 基于 Pod 序号生成 MySQL 服务器的 ID。
    28. [[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1
    29. ordinal=${BASH_REMATCH[1]}
    30. echo [mysqld] > /mnt/conf.d/server-id.cnf
    31. # 添加偏移量以避免使用 server-id=0 这一保留值。
    32. echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
    33. # 将合适的 conf.d 文件从 config-map 复制到 emptyDir。
    34. if [[ $ordinal -eq 0 ]]; then
    35. cp /mnt/config-map/primary.cnf /mnt/conf.d/
    36. else
    37. cp /mnt/config-map/replica.cnf /mnt/conf.d/
    38. fi
    39. volumeMounts:
    40. - name: conf
    41. mountPath: /mnt/conf.d
    42. - name: config-map
    43. mountPath: /mnt/config-map
    44. - name: clone-mysql
    45. image: xtrabackup:1.0
    46. command:
    47. - bash
    48. - "-c"
    49. - |
    50. set -ex
    51. # 如果已有数据,则跳过克隆。
    52. [[ -d /var/lib/mysql/mysql ]] && exit 0
    53. # 跳过主实例(序号索引 0)的克隆。
    54. [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
    55. ordinal=${BASH_REMATCH[1]}
    56. [[ $ordinal -eq 0 ]] && exit 0
    57. # 从原来的对等节点克隆数据。
    58. ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
    59. # 准备备份。
    60. xtrabackup --prepare --target-dir=/var/lib/mysql
    61. volumeMounts:
    62. - name: data
    63. mountPath: /var/lib/mysql
    64. subPath: mysql
    65. - name: conf
    66. mountPath: /etc/mysql/conf.d
    67. containers:
    68. - name: mysql
    69. image: mysql:5.7
    70. env:
    71. - name: MYSQL_ALLOW_EMPTY_PASSWORD
    72. value: "1"
    73. ports:
    74. - name: mysql
    75. containerPort: 3306
    76. volumeMounts:
    77. - name: data
    78. mountPath: /var/lib/mysql
    79. subPath: mysql
    80. - name: conf
    81. mountPath: /etc/mysql/conf.d
    82. resources:
    83. requests:
    84. cpu: 500m
    85. memory: 512Mi
    86. livenessProbe:
    87. exec:
    88. command: ["mysqladmin", "ping"]
    89. initialDelaySeconds: 30
    90. periodSeconds: 10
    91. timeoutSeconds: 5
    92. readinessProbe:
    93. exec:
    94. # 检查我们是否可以通过 TCP 执行查询(skip-networking 是关闭的)。
    95. command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
    96. initialDelaySeconds: 5
    97. periodSeconds: 2
    98. timeoutSeconds: 1
    99. - name: xtrabackup
    100. image: xtrabackup:1.0
    101. ports:
    102. - name: xtrabackup
    103. containerPort: 3307
    104. command:
    105. - bash
    106. - "-c"
    107. - |
    108. set -ex
    109. cd /var/lib/mysql
    110. # 确定克隆数据的 binlog 位置(如果有的话)。
    111. if [[ -f xtrabackup_slave_info && "x$( != "x" ]]; then
    112. # XtraBackup 已经生成了部分的 “CHANGE MASTER TO” 查询
    113. # 因为我们从一个现有副本进行克隆。(需要删除末尾的分号!)
    114. cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
    115. # 在这里要忽略 xtrabackup_binlog_info (它是没用的)。
    116. rm -f xtrabackup_slave_info xtrabackup_binlog_info
    117. elif [[ -f xtrabackup_binlog_info ]]; then
    118. # 我们直接从主实例进行克隆。解析 binlog 位置。
    119. [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
    120. rm -f xtrabackup_binlog_info xtrabackup_slave_info
    121. echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
    122. MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
    123. fi
    124. # 检查我们是否需要通过启动复制来完成克隆。
    125. if [[ -f change_master_to.sql.in ]]; then
    126. echo "Waiting for mysqld to be ready (accepting connections)"
    127. until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
    128. echo "Initializing replication from clone position"
    129. mysql -h 127.0.0.1 \
    130. -e "$(
    131. MASTER_HOST='mysql-0.mysql', \
    132. MASTER_USER='root', \
    133. MASTER_PASSWORD='', \
    134. MASTER_CONNECT_RETRY=10; \
    135. START SLAVE;" || exit 1
    136. # 如果容器重新启动,最多尝试一次。
    137. mv change_master_to.sql.in change_master_to.sql.orig
    138. fi
    139. # 当对等点请求时,启动服务器发送备份。
    140. exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
    141. "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
    142. volumeMounts:
    143. - name: data
    144. mountPath: /var/lib/mysql
    145. subPath: mysql
    146. - name: conf
    147. mountPath: /etc/mysql/conf.d
    148. resources:
    149. requests:
    150. cpu: 100m
    151. memory: 100Mi
    152. volumes:
    153. - name: conf
    154. emptyDir: {}
    155. - name: config-map
    156. configMap:
    157. name: mysql
    158. volumeClaimTemplates:
    159. - metadata:
    160. name: data
    161. spec:
    162. accessModes: ["ReadWriteOnce"]
    163. resources:
    164. requests:
    165. storage: 10Gi
    166. [root@k8s2 mysql]# kubectl apply -f statefulset.yaml
    167. [root@k8s2 mysql]# kubectl get pod

    连接测试

    1. [root@k8s2 mysql]# kubectl run demo --image mysql:5.7 -it -- bash
    2. bash-4.2# mysql -h mysql-0.mysql
    3. mysql> show databases;

    回收

    1. [root@k8s2 mysql]# kubectl delete -f statefulset.yaml
    2. [root@k8s2 mysql]# kubectl delete pvc --all

  • 相关阅读:
    多物种组织载玻片——ProSci 胰腺组织解决方案
    云计算学习笔记——第三章 计算虚拟化[二]
    day3 ARM
    chrome v3开发插件实现所有网站允许跨域
    华为云服务器(Centos7)安装与卸载mysql8
    【案例实战】分布式应用下登录检验解决方案(JWT)
    我用Cypress做了前端自动化测试
    Flink实现kafka到kafka、kafka到doris的精准一次消费
    GAN基础知识及代码
    接了一个2000块的小活,大家进来看看值不值,附源码
  • 原文地址:https://blog.csdn.net/m0_64028800/article/details/134275693