• Minio分布式集群部署(Swarm)


    环境准备

    四台虚拟机

    • 192.168.2.38(管理节点)
    • 192.168.2.81(工作节点)
    • 192.168.2.100(工作节点)
    • 192.168.2.102(工作节点)

    时间同步

    yum install -y ntp
    cat <<EOF>>/var/spool/cron/root
    00 12 * * * /usr/sbin/ntpdate -u ntp1.aliyun.com && /usr/sbin/hwclock -w
    EOF
    ##查看计划任务
    crontab -l
    ##手动执行
    /usr/sbin/ntpdate -u ntp1.aliyun.com && /usr/sbin/hwclock -w
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8

    Docker

    安装Docker

    curl -sSL https://get.daocloud.io/docker | sh
    
    • 1

    启动docker

    sudo systemctl start docker
    sudo systemctl enable docker
    
    • 1
    • 2

    搭建Swarm集群

    打开防火墙(Swarm需要)

    • 管理节点打开2377

      # manager
      firewall-cmd --zone=public --add-port=2377/tcp --permanent
      
      • 1
      • 2
    • 所有节点打开以下端口

      # 所有node
      firewall-cmd --zone=public --add-port=7946/tcp --permanent
      firewall-cmd --zone=public --add-port=7946/udp --permanent
      firewall-cmd --zone=public --add-port=4789/tcp --permanent
      firewall-cmd --zone=public --add-port=4789/udp --permanent
      
      • 1
      • 2
      • 3
      • 4
      • 5
    • 所有节点重启防火墙

      # 所有node
      firewall-cmd --reload
      systemctl restart docker
      
      • 1
      • 2
      • 3
    • 图个方便可以直接关闭防火墙

    创建Swarm

    docker swarm init --advertise-addr your_manager_ip
    
    • 1

    加入Swarm

    docker swarm join --token SWMTKN-1-
    51b7t8whxn8j6mdjt5perjmec9u8qguxq8tern9nill737pra2-ejc5nw5f90oz6xldcbmrl2ztu
    192.168.2.38:2377
    #查看节点
    docker node ls
    
    • 1
    • 2
    • 3
    • 4
    • 5

    服务约束

    添加label

    sudo docker node update --label-add minio1=true 管理节点名称
    sudo docker node update --label-add minio2=true 工作节点名称
    sudo docker node update --label-add minio3=true 工作节点名称
    sudo docker node update --label-add minio4=true 工作节点名称
    
    • 1
    • 2
    • 3
    • 4

    image-20220821202835175

    为MinIO创建Docker secret

    echo "minioadmin" | docker secret create access_key -
    echo "12345678" | docker secret create secret_key -
    
    • 1
    • 2

    Minio集群部署文件

    创建文件存放目录

    管理节点执行

    cd /root
    mkdir minio-swarm
    vi docker-compose-nginx.yml
    
    • 1
    • 2
    • 3

    Docker-Compose.yml

    version: '3.7'
    
    services:
    
      nginx:
        image: nginx
        hostname: minionginx
        volumes:
          - /root/minio-swarm/conf/swarm-nginx.conf:/etc/nginx/nginx.conf
        ports:
          - "9090:80"
          - "9000:9000"
        deploy:
          replicas: 1
          restart_policy:
            delay: 10s
            max_attempts: 10
            window: 60s
          placement:
            constraints:
              - node.labels.minio1==true
          resources:
            limits:
              # cpus: '0.001'
              memory: 1024M
            reservations:
              # cpus: '0.001'
              memory: 64M
        networks:
          - minio_distributed
        depends_on:
          - minio1
          - minio2
          - minio3
          - minio4
    
      minio1:
        image: quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z
        hostname: minio1
        volumes:
          - data1-1:/data1
          - data1-2:/data2
        deploy:
          replicas: 1
          restart_policy:
            delay: 10s
            max_attempts: 10
            window: 60s
          placement:
            constraints:
              - node.labels.minio1==true
          resources:
            limits:
              memory: 2048M
            reservations:
              memory: 512M
        command: server --console-address ":9001" http://minio{1...4}/data{1...2}
        networks:
          - minio_distributed
        secrets:
          - secret_key
          - access_key
        healthcheck:
          test:
            [
              "CMD",
              "curl",
              "-f",
              "http://localhost:9000/minio/health/live"
            ]
          interval: 30s
          timeout: 20s
          retries: 3
    
      minio2:
        image: quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z
        hostname: minio2
        volumes:
          - data2-1:/data1
          - data2-2:/data2
        deploy:
          replicas: 1
          restart_policy:
            delay: 10s
            max_attempts: 10
            window: 60s
          placement:
            constraints:
              - node.labels.minio2==true
          resources:
            limits:
              memory: 2048M
            reservations:
              memory: 512M
        command: server --console-address ":9001" http://minio{1...4}/data{1...2}
        networks:
          - minio_distributed
        secrets:
          - secret_key
          - access_key
        healthcheck:
          test:
            [
              "CMD",
              "curl",
              "-f",
              "http://localhost:9000/minio/health/live"
            ]
          interval: 30s
          timeout: 20s
          retries: 3
    
      minio3:
        image: quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z
        hostname: minio3
        volumes:
          - data3-1:/data1
          - data3-2:/data2
        deploy:
          replicas: 1
          restart_policy:
            delay: 10s
            max_attempts: 10
            window: 60s
          placement:
            constraints:
              - node.labels.minio3==true
          resources:
            limits:
              memory: 2048M
            reservations:
              memory: 512M
        command: server --console-address ":9001" http://minio{1...4}/data{1...2}
        networks:
          - minio_distributed
        secrets:
          - secret_key
          - access_key
        healthcheck:
          test:
            [
              "CMD",
              "curl",
              "-f",
              "http://localhost:9000/minio/health/live"
            ]
          interval: 30s
          timeout: 20s
          retries: 3
    
      minio4:
        image: quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z
        hostname: minio4
        volumes:
          - data4-1:/data1
          - data4-2:/data2
        deploy:
          replicas: 1
          restart_policy:
            delay: 10s
            max_attempts: 10
            window: 60s
          placement:
            constraints:
              - node.labels.minio4==true
          resources:
            limits:
              memory: 2048M
            reservations:
              memory: 512M
        command: server --console-address ":9001" http://minio{1...4}/data{1...2}
        networks:
          - minio_distributed
        secrets:
          - secret_key
          - access_key
        healthcheck:
          test:
            [
              "CMD",
              "curl",
              "-f",
              "http://localhost:9000/minio/health/live"
            ]
          interval: 30s
          timeout: 20s
          retries: 3
    
    volumes:
      data1-1:
      data1-2:
      data2-1:
      data2-2:
      data3-1:
      data3-2:
      data4-1:
      data4-2:
    
    
    networks:
      minio_distributed:
        driver: overlay
    
    secrets:
      secret_key:
        external: true
      access_key:
        external: true
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209

    说明:

    • secret_key和access_key由上一步通过docker secret create xxx - 创建的
    • 一个节点上最好部署一个minio服务,如果部署多个可能会出现磁盘被占用的情况,所以最好是增加机器再部署

    nginx.conf

    创建目录

    cd /root/minio-swarm
    mkdir conf
    cd conf
    vi swarm-nginx.conf
    
    • 1
    • 2
    • 3
    • 4

    如果需要增加集群的节点,需要在Upstream中添加新节点的服务名:9001

    user  nginx;
    worker_processes  auto;
    
    error_log  /var/log/nginx/error.log warn;
    pid        /var/run/nginx.pid;
    
    events {
        worker_connections  4096;
    }
    
    http {
        include       /etc/nginx/mime.types;
        default_type  application/octet-stream;
    
        log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                          '$status $body_bytes_sent "$http_referer" '
                          '"$http_user_agent" "$http_x_forwarded_for"';
    
        access_log  /var/log/nginx/access.log  main;
        sendfile        on;
        keepalive_timeout  65;
    
        upstream minio {
            server minio1:9000;
            server minio2:9000;
            server minio3:9000;
            server minio4:9000;
        }
        server {
            listen       9000;
            listen  [::]:9000;
            server_name  localhost;
    
            # To allow special characters in headers
            ignore_invalid_headers off;
            # Allow any size file to be uploaded.
            # Set to a value such as 1000m; to restrict file size to a specific value
            client_max_body_size 0;
            # To disable buffering
            proxy_buffering off;
            proxy_request_buffering off;
    
            location / {
                proxy_set_header Host $http_host;
                proxy_set_header X-Real-IP $remote_addr;
                proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                proxy_set_header X-Forwarded-Proto $scheme;
    
                proxy_connect_timeout 300;
                # Default is HTTP/1, keepalive is only enabled in HTTP/1.1
                proxy_http_version 1.1;
                proxy_set_header Connection "";
                chunked_transfer_encoding off;
    
                proxy_pass http://minio;
            }
        }
        # include /etc/nginx/conf.d/*.conf;
    
        upstream console {
            server minio1:9001;
            server minio2:9001;
            server minio3:9001;
            server minio4:9001;
        }
    
        server {
            listen       80;
            listen  [::]:80;
            server_name  localhost;
    
            # To allow special characters in headers
            ignore_invalid_headers off;
            # Allow any size file to be uploaded.
            # Set to a value such as 1000m; to restrict file size to a specific value
            client_max_body_size 0;
            # To disable buffering
            proxy_buffering off;
    
            location / {
                proxy_connect_timeout 5;
                proxy_send_timeout 10;
                proxy_read_timeout 10;
    
                proxy_set_header Host $http_host;
                proxy_set_header X-Real-IP $remote_addr;
                proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                proxy_set_header X-Forwarded-Proto $scheme;
    
                # Default is HTTP/1, keepalive is only enabled in HTTP/1.1
                proxy_http_version 1.1;
                proxy_set_header Connection "";
                chunked_transfer_encoding off;
    
                proxy_pass http://console;
            }
        }
    
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99

    部署

    cd /root/minio-swarm
    docker stack deploy -c docker-compose-nginx.yaml minio-swarm
    
    • 1
    • 2

    测试

    浏览器访问地址http://192.168.2.81:9090

    [建议将图片保存下来直接上传(img-3xamwr5f-1661332785682)(https://note.youdao.com/yws/res/a/WEBRESOURCE14276a22c9fd8a782b54a00d658a619a)]

    一个节点宕机

    模拟其中一个节点宕机,看能否正常读取数据(minio集群的写入需要至少4个在线磁盘,如果是两个节点的集群,一个节点宕机,那么集群就只能读取,无法写入)

    如果是一个有N块硬盘的分布式Minio,只要有N/2硬盘在线,你的数据就是可以读取的。不过你需要至少有N/2+1个硬盘来创建新的对象。

    [root@test redis-swarm2]# docker service ls
    ID             NAME                      MODE         REPLICAS   IMAGE                                              PORTS
    l317d9wc49tt   minio-swarm_minio1        replicated   1/1        quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z   
    x2gj6ert03tj   minio-swarm_minio2        replicated   1/1        quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z   
    z624sonlnk02   minio-swarm_minio3        replicated   1/1        quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z   
    xu0gx8mbjocm   minio-swarm_minio4        replicated   1/1        quay.io/minio/minio:RELEASE.2022-02-12T00-51-25Z   
    53w8cpjpe7wd   minio-swarm_nginx         replicated   1/1        nginx:latest                                       *:9000->9000/tcp, *:9090->80/tcp
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7

    现在将其中一台服务器停机处理,刷新浏览器

    [(img-FwWrl71k-1661332785684)(https://note.youdao.com/yws/res/3/WEBRESOURCE83d127166e1b66d6493d7c548548ab73)]

    可以正常写入和读取数据

    在这里插入图片描述

    二个节点宕机

    API可以正常读取数据。如果你退出了minio控制台回到登录界面,则无法再次登录进去。如果没有退出,那你仍然可以查看数据,但无法上传数据。

    [建议将图片保存下来直接上传(img-7AQEj6Gj-1661332785685)(https://note.youdao.com/yws/res/6/WEBRESOURCE02f428d4fb8e44a28152aa17eaf98686)]

    注意: 如果要模拟节点宕机,至少需要3台机器,如果是两台,模拟宕机一台,另一台是无法写入的

    [(img-hc5y7xyb-1661332785687)(https://note.youdao.com/yws/res/b/WEBRESOURCEbeb19c65295de056f5f79441963c7ecb)]

  • 相关阅读:
    C++11—关于引用Reference
    Python超入门(3)__迅速上手操作掌握Python
    【AI 编程助手】DevChat 解析:深入了解、快速配置与实际操作案例的完整指南
    EasyRecovery数据恢复软件2024最新版包括Windows和Mac
    IntentFilter笔记
    整合视图层(Thymeleaf的使用)
    MATLAB数学运算
    基于python的django框架选题推荐企业信息招聘评价管理系统
    k8s创建tomcat容器deployment的yaml
    2023年电工(中级)证模拟考试题库及电工(中级)理论考试试题
  • 原文地址:https://blog.csdn.net/hyx1229/article/details/126466839