• k8s-master 高可用


    在这里插入图片描述

    1. 高可用组件安装

    准备工作

    1. 所有节点部署好
    [root@vms124 ~]# cat /etc/host
    192.168.26.124 k8s-master01
    192.168.26.125 k8s-master02
    192.168.26.126 k8s-master03
    
    2. 同步时间
    [root@vms124 ~]# rpm -ivh http://mirrors.wlnmp.com/centos/wlnmp-release-centos.noarch.rpm 
    [root@vms124 ~]# yum install ntpdate -y
    [root@vms124 ~]# ln -sf /usr/share/zoneinfo/Asia/Shanghai  /etc/localtime 
    [root@vms124 ~]# echo 'Asia/Shanghai' > /etc/timezone
    [root@vms124 ~]# ntpdate time2.aliyun.com
    [root@vms124 ~]# cat >> /var/spool/cron/root <<EOF
    */5 * * * * /user/sbin/ntpdate time2.aliyun.com
    EOF
    

    安装 keepalived 和 HAProxy 并配置

    1. 所有 Master 节点安装 keepalived 和 HAProxy
    [root@vms124 ~]# yum install keepalived haproxy -y
    
    2. 全部节点配置 HAProxy 
    [root@vms124 ~]# cat > /etc/haproxy/haproxy.cfg <<EOF
    global
    
        log         127.0.0.1 local0 err
        maxconn     4000
        ulimit-n 16384
        stats timeout 30s
    
    
    defaults
        mode                    http
        log                     global
        option                  httplog
        timeout http-request    15s
        timeout queue           1m
        timeout connect         10s
        timeout client          5000
        timeout server          5000
        timeout http-keep-alive 15s
    
    frontend monitor-in
        bind *:33305
        mode http
        option httplog
        monitor-uri /monitor
    
    frontend  k8s-master
        bind 0.0.0.0:16443
        bind 127.0.0.1:16443
        mode tcp
        option tcplog
        tcp-request inspect-delay 5s
        default_backend   k8s-master
    
    
    backend k8s-master
        mode tcp
        option tcplog
        option tcp-check
        balance     roundrobin
        default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
        server  k8s-master01 192.168.26.124:6443 check
        server  k8s-master02 192.168.26.125:6443 check
        server  k8s-master03 192.168.26.126:6443 check
    EOF
    

    所有 Master 节点配置 KeepAlived,由于 KeepAlived 需要配置自身的IP地址和网卡名称,因此每个 KeepAlived 节点的配置都不一样

    1. Master01 节点配置
    [root@vms124 ~]# cat /etc/keepalived/keepalived.conf
    ! Configuration File for keepalived
    
    global_defs {
        router_id LVS_DEVEL
    script_user root
        enable_script_security
    }
    
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
        interval 5
        weight -5
        fall 2
    rise 1
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface ens32
        master_src_ip 192.168.26.124
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.26.16
        }
        track_script {
            chk_apiserver
        }
    }
    
    2. master02 节点配置
    ! Configuration File for keepalived
    
    global_defs {
        router_id LVS_DEVEL
    script_user root
        enable_script_security
    }
    
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
        interval 5
        weight -5
        fall 2
    rise 1
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface ens32
        master_src_ip 192.168.26.125
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.26.16
        }
        track_script {
            chk_apiserver
        }
    }
    
    3. master03 节点配置
    ! Configuration File for keepalived
    
    global_defs {
        router_id LVS_DEVEL
    script_user root
        enable_script_security
    }
    
    vrrp_script chk_apiserver {
        script "/etc/keepalived/check_apiserver.sh"
        interval 5
        weight -5
        fall 2
    rise 1
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface ens32
        master_src_ip 192.168.26.126
        virtual_router_id 51
        priority 100
        advert_int 2
        authentication {
            auth_type PASS
            auth_pass K8SHA_KA_AUTH
        }
        virtual_ipaddress {
            192.168.26.16
        }
        track_script {
            chk_apiserver
        }
    }
    

    所有 Master 节点配置 KeepAlived 健康检查文件

    1. 配置文件
    [root@vms124 ~]# cat /etc/keepalived/check_apiserver.sh 
    #!/bin/bash
    
    err=0
    for k in $(seq 1 3)
    do
        check_code=$(pgrep haproxy)
        if [[ $check_code == "" ]]; then
            err=$(expr $err + 1)
            sleep 1
            continue
        else
            err=0
            break
        fi
    done
    
    if [[ $err != "0" ]]; then
        echo "systemctl stop keepalived"
        /usr/bin/systemctl stop keepalived
        exit 1
    else    
        exit 0
    fi
    
    2. 授权
    [root@vms124 ~]# chmod +x /etc/keepalived/check_apiserver.sh
    

    所有节点启动HAProxy 和 KeepAlived

    [root@vms124 ~]# systemctl daemon-reload 
    [root@vms124 ~]# systemctl enable --now haproxy.service 
    Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
    [root@vms124 ~]# systemctl enable --now keepalived.service 
    Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
    
    

    测试 VIP 是否可用

    1. 所有节点 ping 测试
    [root@vms124 ~]# ping 192.168.26.16  -c 4
    PING 192.168.26.16 (192.168.26.16) 56(84) bytes of data.
    64 bytes from 192.168.26.16: icmp_seq=1 ttl=64 time=1.39 ms
    64 bytes from 192.168.26.16: icmp_seq=2 ttl=64 time=2.26 ms
    64 bytes from 192.168.26.16: icmp_seq=3 ttl=64 time=0.369 ms
    64 bytes from 192.168.26.16: icmp_seq=4 ttl=64 time=1.21 ms
    
    --- 192.168.26.16 ping statistics ---
    4 packets transmitted, 4 received, 0% packet loss, time 3004ms
    rtt min/avg/max/mdev = 0.369/1.312/2.265/0.674 ms
    
    2. 所有节点 telnet 测试
    [root@vms124 haproxy]# telnet 192.168.26.16 16443
    Trying 192.168.26.16...
    Connected to 192.168.26.16.
    Escape character is '^]'.
    Connection closed by foreign host.
    
    # 如果ping或者telnet 不通,检查防火墙、SELinux、HAProxy、KeepAlived 的状态
    

    2. 集群初始化

    Master01 节点创建 kubeadm-config.yaml

    1. 生成配置文件
    [root@vms124 haproxy]# kubeadm config print init-defaults > kubeadm-config.yaml
    
    2. 修改配置文件
    [root@vms124 ~]# vim kubeadm-config.yaml
    apiVersion: kubeadm.k8s.io/v1beta3
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 192.168.26.124                 # 修改本地 ip
      bindPort: 6443
    nodeRegistration:
      criSocket: /var/run/dockershim.sock             # 如果是Runtime是Docker,就用/var/run/dockershim.sock,如果是Containerd,就用/run/containerd/containerd.sock
      imagePullPolicy: IfNotPresent
      name: k8s-master01
      taints: 
      - effect: NoSchedule
        key: node-role.kuberbetes.io/master
    ---
    apiServer:
      certSANs:
      - 192.168.26.16                                  # 修改 VIP
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta3
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: 192.168.26.16:16443           # 修改 VIP:端口
    controllerManager: {}
    dns: {}
    etcd:
      local:
        dataDir: /var/lib/etcd
    imageRepository: registry.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: 1.23.2               # 更改版本号,与 kubeadm version 一致
    networking:
      dnsDomain: cluster.local
      serviceSubnet: 10.96.0.0/12       
      podSubnet: 172.16.0.0/12        
    scheduler: {}
    
    

    将 kubeadm-config.yaml 复制到其他节点

    [root@vms124 haproxy]# for i in k8s-master02 k8s-master03;do scp kubeadm-config.yaml  $i:/root/;done
    kubeadm-config.yaml                                                                       100% 1212   591.2KB/s   00:00    
    kubeadm-config.yaml                                                                       100% 1212   769.3KB/s   00:00  
    

    初始化 Master01 节点,初始化后悔在 /etc/kubernetes 目录下生成对应的证书和配置文件,之后的节点加入 Master01 即可

    [root@vms124 haproxy]# kubeadm init --config  /root/kubeadm-config.yaml --upload-certs
    ...
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of the control-plane node running the following command on each as root:
    
      kubeadm join 192.168.26.16:16443 --token abcdef.0123456789abcdef \
    	--discovery-token-ca-cert-hash sha256:5cd90d70bc4fa6c4dc921a6b5d1eada0ff34339ea772440aa2d9889d87597a6a \
    	--control-plane --certificate-key 5a09d08a684d25e5263746ea5cb5e8625f549a364b47124777bdd2e0b795c2df
    
    Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
    As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
    "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 192.168.26.16:16443 --token abcdef.0123456789abcdef \
    	--discovery-token-ca-cert-hash sha256:5cd90d70bc4fa6c4dc921a6b5d1eada0ff34339ea772440aa2d9889d87597a6a 
    

    其他 master 加入节点

      kubeadm join 192.168.26.16:16443 --token abcdef.0123456789abcdef \
    	--discovery-token-ca-cert-hash sha256:5cd90d70bc4fa6c4dc921a6b5d1eada0ff34339ea772440aa2d9889d87597a6a \
    	--control-plane --certificate-key 5a09d08a684d25e5263746ea5cb5e8625f549a364b47124777bdd2e0b795c2df
    

    如果初始化失败,可以再次初始化,清理命令如下:

    [root@vms124 haproxy]# kubeadm reset -f ; ipvsadm --clear ; rm -rf ~/.kube
    

    3个 Master 节点加入后,查看状态

    [root@vms124 haproxy]# kubectl get node
    NAME           STATUS     ROLES                  AGE     VERSION
    k8s-master01   NotReady   control-plane,master   6m34s   v1.23.2
    k8s-master02   NotReady   control-plane,master   22s     v1.23.2
    k8s-master03   NotReady   control-plane,master   97s     v1.23.2
    

    安装完 calico

    [root@vms124 calico]# kubectl get no
    NAME           STATUS   ROLES                  AGE   VERSION
    k8s-master01   Ready    control-plane,master   22m   v1.23.2
    k8s-master02   Ready    control-plane,master   16m   v1.23.2
    k8s-master03   Ready    control-plane,master   17m   v1.23.2
    
    
  • 相关阅读:
    flutter获取字符串和json或者map的md5值
    室友打团太吵?一条命令断掉它的WiFi
    17.RedHat认证-Ansible自动化运维(下)
    day6-双向聚类文献综述
    Python学习笔记第五天(Number)
    Mysql表的约束
    redis缓存三大问题及内存满了该怎么办
    mybatis 源码本地编译
    Linux系统安装最新python详细步骤与问题解决方法【Ubuntu】
    推广PowerBI,你想好了么?
  • 原文地址:https://blog.csdn.net/weixin_45081220/article/details/127090136