• heketi管理glusterfs,k8s基于glusterfs创建storageclass


    1,安装heketi

    heketi 项目介绍:https://github.com/heketi/heketi

    要点概况:

    • 通过 RESTful 接口,管理glusterfs 数据卷(创建,扩容),可以把数据文件分布在多个glusterfs集群之间
    • 通过heketi,可以把glusterfs接入云管理(Kubernetes,OpenStack 等)
    • heketi 版本和 glusterfs版本,不要求一致(可选择各自的稳定版)
    • 安装条件:glusterfs集群3个节点,每个节点需要至少一个空余的磁盘(或分区–未进行文件系统初始化)。需要配置免密登录glusterfs各节点,如果heketi ssh远程账号为非root,需要保证有sudo权限

    参考的文档:https://www.jianshu.com/p/8f36e59ae4f8

    1.1,离线安装

    heketi 二进制包下载:https://github.com/heketi/heketi/releases
    在这里插入图片描述
    编写systemd启动文件

    [root@c73 heketi]# cat /usr/lib/systemd/system/heketi.service
    [Unit]
    Description=Heketi Server
    
    [Service]
    Type=simple
    WorkingDirectory=/var/lib/heketi
    User=heketi
    ExecStart=/usr/bin/heketi --config=/etc/heketi/heketi.json
    Restart=on-failure
    StandardOutput=syslog
    StandardError=syslog
    
    [Install]
    WantedBy=multi-user.target
    
    [root@c73 heketi]# cat /etc/heketi/heketi.json
    {
      "_port_comment": "Heketi Server Port Number",
      "port": "18080", ====>指定服务端口
    
      "_use_auth": "Enable JWT authorization. Please enable for deployment",
      "use_auth": true,  ====>指定服务开启验证
    
      "_jwt": "Private keys for access",
      "jwt": {
        "_admin": "Admin has access to all APIs",
        "admin": {
          "key": "admin" ====>指定admin密码
        },
        "_user": "User only has access to /volumes endpoint",
        "user": {
          "key": "admin" ====>指定admin密码
        }
      },
    
      "_glusterfs_comment": "GlusterFS Configuration",
      "glusterfs": {
        "_executor_comment": [
          "Execute plugin. Possible choices: mock, ssh",
          "mock: This setting is used for testing and development.",
          "      It will not send commands to any node.",
          "ssh:  This setting will notify Heketi to ssh to the nodes.",
          "      It will need the values in sshexec to be configured.",
          "kubernetes: Communicate with GlusterFS containers over",
          "            Kubernetes exec api."
        ],
        "executor": "ssh", ====>指定远程连接方式
    
        "_sshexec_comment": "SSH username and private key file information",
        "sshexec": {   ====>指定远程连接方式-细则
          "keyfile": "/etc/heketi/heketi_key", ==>远程登录账号的私钥
          "user": "heketi",
          "port": "22",
          "sudo": true,
          "fstab": "/etc/fstab"
        },
        
        "_db_comment": "Database file name",
        "db": "/var/lib/heketi/heketi.db",
    
        "_loglevel_comment": [
          "Set log level. Choices are:",
          "  none, critical, error, warning, info, debug",
          "Default is warning"
        ],
        "loglevel" : "debug"
      }
    }
    
    [root@c73 heketi]# systemctl start heketi
    [root@c73 heketi]# systemctl status heketi
    ● heketi.service - Heketi Server
       Loaded: loaded (/usr/lib/systemd/system/heketi.service; disabled; vendor preset: disabled)
       Active: active (running) since Tue 2022-07-05 14:45:35 CST; 2h 11min ago
     Main PID: 6512 (heketi)
        Tasks: 8
       Memory: 7.2M
       CGroup: /system.slice/heketi.service
               └─6512 /usr/bin/heketi --config=/etc/heketi/heketi.json
    
    Jul 05 16:55:37 c73 heketi[6512]: Main PID: 698 (glusterd)
    Jul 05 16:55:37 c73 heketi[6512]: CGroup: /system.slice/glusterd.service
    Jul 05 16:55:37 c73 heketi[6512]: ├─ 698 /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO
    Jul 05 16:55:37 c73 heketi[6512]: ├─1355 /usr/sbin/glusterfsd -s 192.168.56.71 --volfile-id vol_872e4da1d2848c164….56.71-v
    Jul 05 16:55:37 c73 heketi[6512]: └─1378 /usr/sbin/glusterfs -s localhost --volfile-id gluster/glustershd -p /var/run/g…hd
    Jul 05 16:55:37 c73 heketi[6512]: Jul 05 07:18:46 c71 systemd[1]: Starting GlusterFS, a clustered file-system server...
    Jul 05 16:55:37 c73 heketi[6512]: Jul 05 07:18:47 c71 systemd[1]: Started GlusterFS, a clustered file-system server.
    Jul 05 16:55:37 c73 heketi[6512]: ]: Stderr []
    Jul 05 16:55:37 c73 heketi[6512]: [heketi] INFO 2022/07/05 16:55:37 Periodic health check status: node 50c111a951...p=true
    Jul 05 16:55:37 c73 heketi[6512]: [heketi] INFO 2022/07/05 16:55:37 Cleaned 0 nodes from health cache
    Hint: Some lines were ellipsized, use -l to show in full.
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92

    1.2,在线安装

    #CentOS默认无heketi源,添加源及安装
    yum install -y centos-release-gluster
    yum install -y heketi heketi-client
    
    • 1
    • 2
    • 3

    1.3,测试使用heketi管理gfs

    #1, 第一步,先验证服务是否正常启动
    #2, 第二步,设置免密登录各glusterfs节点,然后用heketi-cli添加各节点并创建一个集群
    #3, 第三步,通过 heketi-cli 创建glusterfs数据卷
    #4, 第四步,登录glusterfs节点,验证结果
    
    [root@c73 heketi]# systemctl start heketi
    # 验证服务是否正常启动
    [root@c73 heketi]# curl http://localhost:18080/hello
    Hello from Heketi
    [root@c73 heketi]# heketi-cli --server http://localhost:18080 --user admin --secret "admin" cluster list
    Clusters:
    #设置默认的环境变量 HEKETI_CLI_SERVER
    [root@gluster-server01 heketi]# echo "export HEKETI_CLI_SERVER=http://192.168.56.73:18080" > /etc/profile.d/heketi.sh
    [root@gluster-server01 heketi]# source /etc/profile.d/heketi.sh
    
    #配置免密登录glusterfs各节点,此处省略具体过程(本机生产秘钥,把id_rsa.pub拷贝到gfs个节点的authorized_keys文件,并验证免密登录)
    
    #glusterfs集群节点/dev/sdb1分区:若已被挂载使用,添加则报错
    #glusterfs集群节点/dev/sdb2分区:若未被挂载使用,未被初始化,则添加成功
    #topology-sample.json 可以先写两个glusterfs节点,后期要扩容是再添加一个进来,重新load即可
    [root@c73 heketi]# cat topology-sample.json
    {
        "clusters": [
            {
                "nodes": [
                    {
                        "node": {
                            "hostnames": {
                                "manage": [
                                    "192.168.56.7"
                                ],
                                "storage": [
                                    "192.168.56.7"
                                ]
                            },
                            "zone": 1
                        },
                        "devices": [
                            "/dev/sdb2"
                        ]
                    },
                    {
                        "node": {
                            "hostnames": {
                                "manage": [
                                    "192.168.56.71"
                                ],
                                "storage": [
                                    "192.168.56.71"
                                ]
                            },
                            "zone": 1
                        },
                        "devices": [
                            "/dev/sdb2"
                        ]
                    }              
                ]
            }
        ]
    }
    [root@c73 heketi]#  heketi-cli topology load --json=topology-sample.json
    Error: Unable to get topology information: Invalid JWT token: Token missing iss claim
    #这是因为新版本的 heketi 在创建 gfs 集群时需要带上参数,声明用户名及密码,相应值在 heketi.json 文件中配置
    [root@c73 heketi]# heketi-cli --user admin --secret admin topology load --json topology-sample.json
    Creating cluster ... ID: 61f067af620b7bd2a8cbc3f375b19c8c
            Allowing file volumes on cluster.
            Allowing block volumes on cluster.
            Creating node 192.168.56.7 ... ID: 023eca1214a5d9e24af56d1fa4aaceb8
                    Adding device /dev/sdb1 ... Unable to add device: Setup of device /dev/sdb1 failed (already initialized or contains data?):   Can't open /dev/sdb1 exclusively.  Mounted filesystem?
      Can't open /dev/sdb1 exclusively.  Mounted filesystem?
            Creating node 192.168.56.71 ... ID: 50c111a95160da5a85d944f98c888a05
                    Adding device /dev/sdb1 ... Unable to add device: Setup of device /dev/sdb1 failed (already initialized or contains data?):   Can't open /dev/sdb1 exclusively.  Mounted filesystem?
      Can't open /dev/sdb1 exclusively.  Mounted filesystem?
    
    [root@c73 heketi]# heketi-cli --user admin --secret admin  topology load --json topology-sample.json
            Found node 192.168.56.7 on cluster 61f067af620b7bd2a8cbc3f375b19c8c
                    Adding device /dev/sdb2 ... OK
            Found node 192.168.56.71 on cluster 61f067af620b7bd2a8cbc3f375b19c8c
                    Adding device /dev/sdb2 ... OK
    
      
    
    #创建一个1G大小,2副本的数据卷  --server http://localhost:18080
    [root@c73 heketi]# heketi-cli  --user admin --secret "admin" cluster list
    Clusters:
    Id:61f067af620b7bd2a8cbc3f375b19c8c [file][block]
    		
    [root@c73 heketi]# heketi-cli  --user admin --secret admin volume create --size=1  --replica=2 --clusters=61f067af620b7bd2a8cbc3f375b19c8c
    Name: vol_872e4da1d2848c1645ab30c21fc05e3d
    Size: 1
    Volume Id: 872e4da1d2848c1645ab30c21fc05e3d
    Cluster Id: 61f067af620b7bd2a8cbc3f375b19c8c
    Mount: 192.168.56.7:vol_872e4da1d2848c1645ab30c21fc05e3d
    Mount Options: backup-volfile-servers=192.168.56.71
    Block: false
    Free Size: 0
    Reserved Size: 0
    Block Hosting Restriction: (none)
    Block Volumes: []
    Durability Type: replicate
    Distribute Count: 1
    Replica Count: 2
    
    #登录glusterfs节点验证---(c72未添加到topology-sample.json, c71添加了)---
    [root@c72 ~]# gluster volume list
    v2
    vol_872e4da1d2848c1645ab30c21fc05e3d
    [root@c72 ~]# lsblk
    NAME   MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
    sda      8:0    0  40G  0 disk
    └─sda1   8:1    0  40G  0 part /
    sdb      8:16   0  20G  0 disk
    ├─sdb1   8:17   0  10G  0 part /export
    └─sdb2   8:18   0   4G  0 part
    
    [root@c71 ~]# lsblk
    NAME                                                                                MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
    sda                                                                                   8:0    0  40G  0 disk
    └─sda1                                                                                8:1    0  40G  0 part /
    sdb                                                                                   8:16   0  20G  0 disk
    ├─sdb1                                                                                8:17   0  10G  0 part /export
    └─sdb2                                                                                8:18   0   4G  0 part
      ├─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9_tmeta   253:0    0   8M  0 lvm
      │ └─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9-tpool 253:2    0   1G  0 lvm
      │   ├─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9     253:3    0   1G  1 lvm
      │   └─vg_349d0d051c37d48c2be4b13f69b98b88-brick_f2345876c46be5d44d90ceec79668826  253:4    0   1G  0 lvm  /var/lib/heket
      └─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9_tdata   253:1    0   1G  0 lvm
        └─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9-tpool 253:2    0   1G  0 lvm
          ├─vg_349d0d051c37d48c2be4b13f69b98b88-tp_ea5ad4d332de7de61e57943f5e4a7dd9     253:3    0   1G  1 lvm
          └─vg_349d0d051c37d48c2be4b13f69b98b88-brick_f2345876c46be5d44d90ceec79668826  253:4    0   1G  0 lvm  /var/lib/heket
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131

    2,使用k8s创建glusterfs数据卷

    [root@JXQ-11-243-32-216 ~]# kubectl create ns heketi-gfs
    
    [root@JXQ-11-243-32-216 ~]# cat a.yaml 
    ---
    apiVersion: v1
    kind: Secret
    metadata:
      name: heketi-secret
      namespace: heketi-gfs
    data:
      # base64 encoded password. E.g.: echo -n "mypassword" | base64
      key: YWRtaW4=
    type: kubernetes.io/glusterfs
    
    ---
    apiVersion: storage.k8s.io/v1beta1
    kind: StorageClass
    metadata:
      name: glusterfs
      namespace: heketi-gfs
    provisioner: kubernetes.io/glusterfs
    #reclaimPolicy: Delete #默认值
    reclaimPolicy: Retain
    allowVolumeExpansion: true
    parameters:
      resturl: "http://11.243.32.215:18080"
      clusterid: "e5d93c15f7a98cfd60a9d5f9d5817048"
      restauthenabled: "true"
      restuser: "admin"
      #secretNamespace: "heketi-gfs"
      #secretName: "heketi-secret"
      restuserkey: "admin"
      gidMin: "40000"
      gidMax: "50000"
      volumetype: "replicate:2"
    
    [root@JXQ-11-243-32-216 ~]# kubectl apply -f a.yaml 
    secret/heketi-secret created
    storageclass.storage.k8s.io/glusterfs created
    
    [root@JXQ-11-243-32-216 ~]# kubectl get storageclass -n heketi-gfs
    NAME        PROVISIONER               RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
    glusterfs   kubernetes.io/glusterfs   Retain          Immediate           true                   4s
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
  • 相关阅读:
    HTML5+CSS3小实例:侧边导航栏
    已解决 Bug——IndexError: index 3 is out of bounds for axis 0 with size 3问题
    鼠标悬浮在进度条上时视频预览的实现
    封装unordered_map和unordered_set
    synchronized详解
    【API篇】十一、Flink水位线传递与迟到数据处理
    前端问题整理
    window和linux下载ffmpeg
    分享公司企业官网展示小程序开发制作功能介绍
    java可以跨平台的原因是什么?
  • 原文地址:https://blog.csdn.net/eyeofeagle/article/details/125623215