• kubernetes集群编排——k8s存储(volumes,持久卷,statefulset控制器)


    volumes

    emptyDir卷

    vim emptydir.yaml
    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: vol1
    5. spec:
    6. containers:
    7. - image: busyboxplus
    8. name: vm1
    9. command: ["sleep", "300"]
    10. volumeMounts:
    11. - mountPath: /cache
    12. name: cache-volume
    13. - name: vm2
    14. image: nginx
    15. volumeMounts:
    16. - mountPath: /usr/share/nginx/html
    17. name: cache-volume
    18. volumes:
    19. - name: cache-volume
    20. emptyDir:
    21. medium: Memory
    22. sizeLimit: 100Mi
    1. kubectl apply -f emptydir.yaml
    2. kubectl get pod


     

    1. kubectl exec vol1 -c vm1 -it -- sh
    2. / # cd /cache/
    3. /cache # curl localhost
    4. /cache # echo www.westos.org > index.html
    5. /cache # curl localhost
    6. /cache # dd if=/dev/zero of=bigfile bs=1M count=200
    7. /cache # du -h bigfile

    hostpath卷

    vim hostpath.yaml
    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: vol2
    5. spec:
    6. nodeName: k8s4
    7. containers:
    8. - image: nginx
    9. name: test-container
    10. volumeMounts:
    11. - mountPath: /usr/share/nginx/html
    12. name: test-volume
    13. volumes:
    14. - name: test-volume
    15. hostPath:
    16. path: /data
    17. type: DirectoryOrCreate
    1. kubectl apply -f hostpath.yaml
    2. kubectl get pod -o wide

    [root@k8s4 data]# echo www.westos.org > index.html
    curl 10.244.106.152

    nfs卷

    配置nfsserver

    1. [root@k8s1 ~]# yum install -y nfs-utils
    2. [root@k8s1 ~]# vim /etc/exports
    3. /nfsdata *(rw,sync,no_root_squash)
    4. [root@k8s1 ~]# mkdir -m 777 /nfsdata
    5. [root@k8s1 ~]# systemctl enable --now nfs
    6. [root@k8s1 ~]# showmount -e

    vim nfs.yaml
    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: nfs
    5. spec:
    6. containers:
    7. - image: nginx
    8. name: test-container
    9. volumeMounts:
    10. - mountPath: /usr/share/nginx/html
    11. name: test-volume
    12. volumes:
    13. - name: test-volume
    14. nfs:
    15. server: 192.168.92.11
    16. path: /nfsdata

    需要在所有k8s节点上安装nfs-utils软件包

    yum install -y nfs-utils

    没有安装会有以下错误

    1. kubectl apply -f nfs.yaml
    2. kubectl get pod -o wide

    在nfsserver端创建测试页

    1. [root@k8s1 ~]# cd /nfsdata/
    2. [root@k8s1 nfsdata]# echo www.westos.org > index.html

    [root@k8s2 volumes]# curl 10.244.106.153

    持久卷

    配置nfs输出目录

    1. [root@k8s1 ~]# cd /nfsdata/
    2. [root@k8s1 nfsdata]# mkdir pv1 pv2 pv3

    创建静态pv

    vim pv.yaml
    1. apiVersion: v1
    2. kind: PersistentVolume
    3. metadata:
    4. name: pv1
    5. spec:
    6. capacity:
    7. storage: 5Gi
    8. volumeMode: Filesystem
    9. accessModes:
    10. - ReadWriteOnce
    11. persistentVolumeReclaimPolicy: Recycle
    12. storageClassName: nfs
    13. nfs:
    14. path: /nfsdata/pv1
    15. server: 192.168.92.11
    16. ---
    17. apiVersion: v1
    18. kind: PersistentVolume
    19. metadata:
    20. name: pv2
    21. spec:
    22. capacity:
    23. storage: 10Gi
    24. volumeMode: Filesystem
    25. accessModes:
    26. - ReadWriteMany
    27. persistentVolumeReclaimPolicy: Delete
    28. storageClassName: nfs
    29. nfs:
    30. path: /nfsdata/pv2
    31. server: 192.168.92.11
    32. ---
    33. apiVersion: v1
    34. kind: PersistentVolume
    35. metadata:
    36. name: pv3
    37. spec:
    38. capacity:
    39. storage: 15Gi
    40. volumeMode: Filesystem
    41. accessModes:
    42. - ReadOnlyMany
    43. persistentVolumeReclaimPolicy: Retain
    44. storageClassName: nfs
    45. nfs:
    46. path: /nfsdata/pv3
    47. server: 192.168.92.11
    1. kubectl apply -f pv.yaml
    2. kubectl get pv

    创建pvc

    vim pvc.yaml
    1. apiVersion: v1
    2. kind: PersistentVolumeClaim
    3. metadata:
    4. name: pvc1
    5. spec:
    6. storageClassName: nfs
    7. accessModes:
    8. - ReadWriteOnce
    9. resources:
    10. requests:
    11. storage: 1Gi
    12. ---
    13. apiVersion: v1
    14. kind: PersistentVolumeClaim
    15. metadata:
    16. name: pvc2
    17. spec:
    18. storageClassName: nfs
    19. accessModes:
    20. - ReadWriteMany
    21. resources:
    22. requests:
    23. storage: 10Gi
    24. ---
    25. apiVersion: v1
    26. kind: PersistentVolumeClaim
    27. metadata:
    28. name: pvc3
    29. spec:
    30. storageClassName: nfs
    31. accessModes:
    32. - ReadOnlyMany
    33. resources:
    34. requests:
    35. storage: 15Gi
    1. kubectl apply -f pvc.yaml
    2. kubectl get pvc
    3. kubectl get pv

    创建pod

    vim pod.yaml
    1. apiVersion: v1
    2. kind: Pod
    3. metadata:
    4. name: test-pod1
    5. spec:
    6. containers:
    7. - image: nginx
    8. name: nginx
    9. volumeMounts:
    10. - mountPath: /usr/share/nginx/html
    11. name: vol1
    12. volumes:
    13. - name: vol1
    14. persistentVolumeClaim:
    15. claimName: pvc1
    16. ---
    17. apiVersion: v1
    18. kind: Pod
    19. metadata:
    20. name: test-pod2
    21. spec:
    22. containers:
    23. - image: nginx
    24. name: nginx
    25. volumeMounts:
    26. - mountPath: /usr/share/nginx/html
    27. name: vol1
    28. volumes:
    29. - name: vol1
    30. persistentVolumeClaim:
    31. claimName: pvc2
    32. ---
    33. apiVersion: v1
    34. kind: Pod
    35. metadata:
    36. name: test-pod3
    37. spec:
    38. containers:
    39. - image: nginx
    40. name: nginx
    41. volumeMounts:
    42. - mountPath: /usr/share/nginx/html
    43. name: vol1
    44. volumes:
    45. - name: vol1
    46. persistentVolumeClaim:
    47. claimName: pvc3
    1. kubectl apply -f pod.yaml
    2. kubectl get pod -o wide

    在nfs输出目录中创建测试页

    1. echo pv1 > pv1/index.html
    2. echo pv2 > pv2/index.html
    3. echo pv3 > pv3/index.html
    1. [root@k8s2 pv]# curl 10.244.106.154
    2. [root@k8s2 pv]# curl 10.244.106.155
    3. [root@k8s2 pv]# curl 10.244.106.156

    回收资源,需要按顺序回收: pod -> pvc -> pv

    1. kubectl delete -f pod.yml
    2. kubectl delete -f pvc.yml

    回收pvc后,pv会被回收再利用

    kubectl get pv

    pv的回收需要拉取镜像,提前在node节点导入镜像

    containerd 导入镜像

    1. [root@k8s3 ~]# ctr -n=k8s.io image import debian-base.tar
    2. [root@k8s4 ~]# ctr -n=k8s.io image import debian-base.tar

    回收

    kubectl delete -f pv.yaml

    storageclass

    官网: GitHub - kubernetes-sigs/nfs-subdir-external-provisioner: Dynamic sub-dir volume provisioner on a remote NFS server.

    上传镜像

    创建sa并授权

    [root@k8s2 storageclass]# vim nfs-client.yaml
    1. apiVersion: v1
    2. kind: Namespace
    3. metadata:
    4. labels:
    5. kubernetes.io/metadata.name: nfs-client-provisioner
    6. name: nfs-client-provisioner
    7. ---
    8. apiVersion: v1
    9. kind: ServiceAccount
    10. metadata:
    11. name: nfs-client-provisioner
    12. namespace: nfs-client-provisioner
    13. ---
    14. kind: ClusterRole
    15. apiVersion: rbac.authorization.k8s.io/v1
    16. metadata:
    17. name: nfs-client-provisioner-runner
    18. rules:
    19. - apiGroups: [""]
    20. resources: ["nodes"]
    21. verbs: ["get", "list", "watch"]
    22. - apiGroups: [""]
    23. resources: ["persistentvolumes"]
    24. verbs: ["get", "list", "watch", "create", "delete"]
    25. - apiGroups: [""]
    26. resources: ["persistentvolumeclaims"]
    27. verbs: ["get", "list", "watch", "update"]
    28. - apiGroups: ["storage.k8s.io"]
    29. resources: ["storageclasses"]
    30. verbs: ["get", "list", "watch"]
    31. - apiGroups: [""]
    32. resources: ["events"]
    33. verbs: ["create", "update", "patch"]
    34. ---
    35. kind: ClusterRoleBinding
    36. apiVersion: rbac.authorization.k8s.io/v1
    37. metadata:
    38. name: run-nfs-client-provisioner
    39. subjects:
    40. - kind: ServiceAccount
    41. name: nfs-client-provisioner
    42. namespace: nfs-client-provisioner
    43. roleRef:
    44. kind: ClusterRole
    45. name: nfs-client-provisioner-runner
    46. apiGroup: rbac.authorization.k8s.io
    47. ---
    48. kind: Role
    49. apiVersion: rbac.authorization.k8s.io/v1
    50. metadata:
    51. name: leader-locking-nfs-client-provisioner
    52. namespace: nfs-client-provisioner
    53. rules:
    54. - apiGroups: [""]
    55. resources: ["endpoints"]
    56. verbs: ["get", "list", "watch", "create", "update", "patch"]
    57. ---
    58. kind: RoleBinding
    59. apiVersion: rbac.authorization.k8s.io/v1
    60. metadata:
    61. name: leader-locking-nfs-client-provisioner
    62. namespace: nfs-client-provisioner
    63. subjects:
    64. - kind: ServiceAccount
    65. name: nfs-client-provisioner
    66. namespace: nfs-client-provisioner
    67. roleRef:
    68. kind: Role
    69. name: leader-locking-nfs-client-provisioner
    70. apiGroup: rbac.authorization.k8s.io
    71. ---
    72. apiVersion: apps/v1
    73. kind: Deployment
    74. metadata:
    75. name: nfs-client-provisioner
    76. labels:
    77. app: nfs-client-provisioner
    78. namespace: nfs-client-provisioner
    79. spec:
    80. replicas: 1
    81. strategy:
    82. type: Recreate
    83. selector:
    84. matchLabels:
    85. app: nfs-client-provisioner
    86. template:
    87. metadata:
    88. labels:
    89. app: nfs-client-provisioner
    90. spec:
    91. serviceAccountName: nfs-client-provisioner
    92. containers:
    93. - name: nfs-client-provisioner
    94. image: sig-storage/nfs-subdir-external-provisioner:v4.0.2
    95. volumeMounts:
    96. - name: nfs-client-root
    97. mountPath: /persistentvolumes
    98. env:
    99. - name: PROVISIONER_NAME
    100. value: k8s-sigs.io/nfs-subdir-external-provisioner
    101. - name: NFS_SERVER
    102. value: 192.168.92.11
    103. - name: NFS_PATH
    104. value: /nfsdata
    105. volumes:
    106. - name: nfs-client-root
    107. nfs:
    108. server: 192.168.92.11
    109. path: /nfsdata
    110. ---
    111. apiVersion: storage.k8s.io/v1
    112. kind: StorageClass
    113. metadata:
    114. name: nfs-client
    115. annotations:
    116. storageclass.kubernetes.io/is-default-class: "true"
    117. provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
    118. parameters:
    119. archiveOnDelete: "false"
    1. kubectl apply -f nfs-client.yaml
    2. kubectl -n nfs-client-provisioner get pod
    3. kubectl get sc

    创建pvc

    vim pvc.yaml
    1. kind: PersistentVolumeClaim
    2. apiVersion: v1
    3. metadata:
    4. name: test-claim
    5. spec:
    6. storageClassName: nfs-client
    7. accessModes:
    8. - ReadWriteMany
    9. resources:
    10. requests:
    11. storage: 1Gi
    1. kubectl apply -f pvc.yaml
    2. kubectl get pvc

    创建pod

    vim pod.yaml
    1. kind: Pod
    2. apiVersion: v1
    3. metadata:
    4. name: test-pod
    5. spec:
    6. containers:
    7. - name: test-pod
    8. image: busybox
    9. command:
    10. - "/bin/sh"
    11. args:
    12. - "-c"
    13. - "touch /mnt/SUCCESS && exit 0 || exit 1"
    14. volumeMounts:
    15. - name: nfs-pvc
    16. mountPath: "/mnt"
    17. restartPolicy: "Never"
    18. volumes:
    19. - name: nfs-pvc
    20. persistentVolumeClaim:
    21. claimName: test-claim
    kubectl apply -f pod.yaml

    pod会在pv中创建一个文件

    回收

    1. kubectl delete -f pod.yaml
    2. kubectl delete -f pvc.yaml

    设置默认存储类,这样在创建pvc时可以不用指定storageClassName

    kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
    kubectl get sc

    statefulset控制器

    vim headless.yaml
    1. apiVersion: v1
    2. kind: Service
    3. metadata:
    4. name: nginx-svc
    5. labels:
    6. app: nginx
    7. spec:
    8. ports:
    9. - port: 80
    10. name: web
    11. clusterIP: None
    12. selector:
    13. app: nginx
    1. kubectl apply -f headless.yaml
    2. kubectl get svc

    vim statefulset.yaml
    1. apiVersion: apps/v1
    2. kind: StatefulSet
    3. metadata:
    4. name: web
    5. spec:
    6. serviceName: "nginx-svc"
    7. replicas: 3
    8. selector:
    9. matchLabels:
    10. app: nginx
    11. template:
    12. metadata:
    13. labels:
    14. app: nginx
    15. spec:
    16. containers:
    17. - name: nginx
    18. image: nginx
    19. volumeMounts:
    20. - name: www
    21. mountPath: /usr/share/nginx/html
    22. volumeClaimTemplates:
    23. - metadata:
    24. name: www
    25. spec:
    26. storageClassName: nfs-client
    27. accessModes:
    28. - ReadWriteOnce
    29. resources:
    30. requests:
    31. storage: 1Gi
    1. kubectl apply -f statefulset.yaml
    2. kubectl get pod

    在nfs输出目录创建测试页

    1. echo web-0 > default-www-web-0-pvc-8661e761-2aa9-4514-9a37-45be34af3196/index.html
    2. echo web-1 > default-www-web-1-pvc-79b4afc4-c159-409f-8757-35635befa584/index.html
    3. echo web-2 > default-www-web-2-pvc-ee2ae058-a2d9-4f94-b55c-d69ef2f3c0b6/index.html
    kubectl run demo --image busyboxplus -it
    1. / # curl web-0.nginx-svc
    2. / # curl web-1.nginx-svc
    3. / # curl web-2.nginx-svc

    statefulset有序回收

    1. kubectl scale statefulsets web --replicas=0
    2. kubectl delete -f statefulset.yaml
    3. kubectl delete pvc --all

    mysql主从部署

    官网:https://v1-25.docs.kubernetes.io/zh-cn/docs/tasks/run-application/run-replicated-stateful-application/

    上传镜像

    vim configmap.yaml
    1. apiVersion: v1
    2. kind: ConfigMap
    3. metadata:
    4. name: mysql
    5. labels:
    6. app: mysql
    7. app.kubernetes.io/name: mysql
    8. data:
    9. primary.cnf: |
    10. [mysqld]
    11. log-bin
    12. replica.cnf: |
    13. [mysqld]
    14. super-read-only
    1. kubectl apply -f configmap.yaml
    2. kubectl get cm

    vim svc.yaml
    1. apiVersion: v1
    2. kind: Service
    3. metadata:
    4. name: mysql
    5. labels:
    6. app: mysql
    7. app.kubernetes.io/name: mysql
    8. spec:
    9. ports:
    10. - name: mysql
    11. port: 3306
    12. clusterIP: None
    13. selector:
    14. app: mysql
    15. ---
    16. apiVersion: v1
    17. kind: Service
    18. metadata:
    19. name: mysql-read
    20. labels:
    21. app: mysql
    22. app.kubernetes.io/name: mysql
    23. readonly: "true"
    24. spec:
    25. ports:
    26. - name: mysql
    27. port: 3306
    28. selector:
    29. app: mysql
    1. kubectl apply -f svc.yaml
    2. kubectl get svc

    vim statefulset.yaml
    1. apiVersion: apps/v1
    2. kind: StatefulSet
    3. metadata:
    4. name: mysql
    5. spec:
    6. selector:
    7. matchLabels:
    8. app: mysql
    9. app.kubernetes.io/name: mysql
    10. serviceName: mysql
    11. replicas: 3
    12. template:
    13. metadata:
    14. labels:
    15. app: mysql
    16. app.kubernetes.io/name: mysql
    17. spec:
    18. initContainers:
    19. - name: init-mysql
    20. image: mysql:5.7
    21. command:
    22. - bash
    23. - "-c"
    24. - |
    25. set -ex
    26. # 基于 Pod 序号生成 MySQL 服务器的 ID。
    27. [[ $HOSTNAME =~ -([0-9]+)$ ]] || exit 1
    28. ordinal=${BASH_REMATCH[1]}
    29. echo [mysqld] > /mnt/conf.d/server-id.cnf
    30. # 添加偏移量以避免使用 server-id=0 这一保留值。
    31. echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
    32. # 将合适的 conf.d 文件从 config-map 复制到 emptyDir。
    33. if [[ $ordinal -eq 0 ]]; then
    34. cp /mnt/config-map/primary.cnf /mnt/conf.d/
    35. else
    36. cp /mnt/config-map/replica.cnf /mnt/conf.d/
    37. fi
    38. volumeMounts:
    39. - name: conf
    40. mountPath: /mnt/conf.d
    41. - name: config-map
    42. mountPath: /mnt/config-map
    43. - name: clone-mysql
    44. image: xtrabackup:1.0
    45. command:
    46. - bash
    47. - "-c"
    48. - |
    49. set -ex
    50. # 如果已有数据,则跳过克隆。
    51. [[ -d /var/lib/mysql/mysql ]] && exit 0
    52. # 跳过主实例(序号索引 0)的克隆。
    53. [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
    54. ordinal=${BASH_REMATCH[1]}
    55. [[ $ordinal -eq 0 ]] && exit 0
    56. # 从原来的对等节点克隆数据。
    57. ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
    58. # 准备备份。
    59. xtrabackup --prepare --target-dir=/var/lib/mysql
    60. volumeMounts:
    61. - name: data
    62. mountPath: /var/lib/mysql
    63. subPath: mysql
    64. - name: conf
    65. mountPath: /etc/mysql/conf.d
    66. containers:
    67. - name: mysql
    68. image: mysql:5.7
    69. env:
    70. - name: MYSQL_ALLOW_EMPTY_PASSWORD
    71. value: "1"
    72. ports:
    73. - name: mysql
    74. containerPort: 3306
    75. volumeMounts:
    76. - name: data
    77. mountPath: /var/lib/mysql
    78. subPath: mysql
    79. - name: conf
    80. mountPath: /etc/mysql/conf.d
    81. resources:
    82. requests:
    83. cpu: 500m
    84. memory: 512Mi
    85. livenessProbe:
    86. exec:
    87. command: ["mysqladmin", "ping"]
    88. initialDelaySeconds: 30
    89. periodSeconds: 10
    90. timeoutSeconds: 5
    91. readinessProbe:
    92. exec:
    93. # 检查我们是否可以通过 TCP 执行查询(skip-networking 是关闭的)。
    94. command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
    95. initialDelaySeconds: 5
    96. periodSeconds: 2
    97. timeoutSeconds: 1
    98. - name: xtrabackup
    99. image: xtrabackup:1.0
    100. ports:
    101. - name: xtrabackup
    102. containerPort: 3307
    103. command:
    104. - bash
    105. - "-c"
    106. - |
    107. set -ex
    108. cd /var/lib/mysql
    109. # 确定克隆数据的 binlog 位置(如果有的话)。
    110. if [[ -f xtrabackup_slave_info && "x$( != "x" ]]; then
    111. # XtraBackup 已经生成了部分的 “CHANGE MASTER TO” 查询
    112. # 因为我们从一个现有副本进行克隆。(需要删除末尾的分号!)
    113. cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
    114. # 在这里要忽略 xtrabackup_binlog_info (它是没用的)。
    115. rm -f xtrabackup_slave_info xtrabackup_binlog_info
    116. elif [[ -f xtrabackup_binlog_info ]]; then
    117. # 我们直接从主实例进行克隆。解析 binlog 位置。
    118. [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
    119. rm -f xtrabackup_binlog_info xtrabackup_slave_info
    120. echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
    121. MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
    122. fi
    123. # 检查我们是否需要通过启动复制来完成克隆。
    124. if [[ -f change_master_to.sql.in ]]; then
    125. echo "Waiting for mysqld to be ready (accepting connections)"
    126. until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done
    127. echo "Initializing replication from clone position"
    128. mysql -h 127.0.0.1 \
    129. -e "$(
    130. MASTER_HOST='mysql-0.mysql', \
    131. MASTER_USER='root', \
    132. MASTER_PASSWORD='', \
    133. MASTER_CONNECT_RETRY=10; \
    134. START SLAVE;" || exit 1
    135. # 如果容器重新启动,最多尝试一次。
    136. mv change_master_to.sql.in change_master_to.sql.orig
    137. fi
    138. # 当对等点请求时,启动服务器发送备份。
    139. exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
    140. "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"
    141. volumeMounts:
    142. - name: data
    143. mountPath: /var/lib/mysql
    144. subPath: mysql
    145. - name: conf
    146. mountPath: /etc/mysql/conf.d
    147. resources:
    148. requests:
    149. cpu: 100m
    150. memory: 100Mi
    151. volumes:
    152. - name: conf
    153. emptyDir: {}
    154. - name: config-map
    155. configMap:
    156. name: mysql
    157. volumeClaimTemplates:
    158. - metadata:
    159. name: data
    160. spec:
    161. accessModes: ["ReadWriteOnce"]
    162. resources:
    163. requests:
    164. storage: 10Gi
    1. kubectl apply -f statefulset.yaml
    2. kubectl get pod

    连接测试

    1. kubectl run demo --image mysql:5.7 -it -- bash
    2. root@demo:/# mysql -h mysql-0.mysql
    3. mysql> show databases;

    回收

    1. kubectl delete -f statefulset.yaml
    2. kubectl delete pvc --all
  • 相关阅读:
    32 数据分析(下)pandas介绍
    【深度学习】(4) Transformer 中的 Decoder 机制,附Pytorch完整代码
    微服务(SpringCloud、Dubbo、Seata、Sentinel、SpringGateway)
    Kotlin的遍历方法
    读书笔记-《ON JAVA 中文版》-摘要3
    【第一阶段:java基础】第7章:面向对象编程中级-2(P307-P318):多态
    正则表达式与字符串操作
    linux系统与应用
    【数学建模】基于matlab室内VLC模型(含BER和SNR)【含Matlab源码 2223期】
    LeetCode 1413. 逐步求和得到正数的最小值
  • 原文地址:https://blog.csdn.net/dgffd/article/details/134275766