• kubernetes集群编排(13)


    目录

    k8s高可用集群

    haproxy负载均衡

    pacemaker高可用

    部署control-plane

    部署worker node


    k8s高可用集群

    实验环境

    主机名

    IP

    角色

    k8s1

    192.168.81.10

    harbor

    k8s2

    192.168.81.11

    control-plane

    k8s3

    192.168.81.12

    control-plane

    k8s4

    192.168.81.13

    control-plane

    k8s5

    192.168.81.14

    haproxy,pacemaker

    k8s6

    192.168.81.15

    haproxy,pacemaker

    k8s7

    192.168.81.16

    worker node

    haproxy负载均衡

    配置节点解析,所有节点解析保持一致

    1. [root@k8s5 ~]# yum install -y haproxy net-tools
    2. [root@k8s5 ~]# cd /etc/haproxy/
    3. [root@k8s5 haproxy]# vim haproxy.cfg
    4. #---------------------------------------------------------------------
    5. defaults
    6. mode http
    7. log global
    8. #option httplog
    9. option dontlognull
    10. option http-server-close
    11. #option forwardfor except 127.0.0.0/8
    12. option redispatch
    13. retries 3
    14. timeout http-request 10s
    15. timeout queue 1m
    16. timeout connect 10s
    17. timeout client 1m
    18. timeout server 1m
    19. timeout http-keep-alive 10s
    20. timeout check 10s
    21. maxconn 3000
    22. listen status *:80 #监控
    23. stats uri /status
    24. stats auth admin:westos
    25. #---------------------------------------------------------------------
    26. # main frontend which proxys to the backends
    27. #---------------------------------------------------------------------
    28. frontend main *:6443
    29. mode tcp
    30. default_backend k8s
    31. #---------------------------------------------------------------------
    32. # round robin balancing between the various backends
    33. #---------------------------------------------------------------------
    34. backend k8s
    35. mode tcp
    36. balance roundrobin
    37. server app1 192.168.81.11:6443 check
    38. server app2 192.168.81.12:6443 check
    39. server app3 192.168.81.13:6443 check

    注意:需要修改为自己的k8s control-plane地址

    [root@k8s5 haproxy]# systemctl  start haproxy
    

    访问监控页面:http://192.168.81.14/status

    admin/westos

    测试成功后关闭服务,不要设置自启动

    [root@k8s5 haproxy]# systemctl  stop haproxy
    

    设置免密

    1. [root@k8s5 haproxy]# ssh-keygen
    2. [root@k8s5 haproxy]# ssh-copy-id k8s6

    k8s6节点安装haproxy软件

    [root@k8s6 ~]# yum install -y haproxy
    

    从k8s5拷贝配置文件

    [root@k8s5 haproxy]# scp haproxy.cfg  k8s6:/etc/haproxy/
    

    pacemaker高可用

    同步配置文件

    1. [root@k8s5 ~]# cd /etc/yum.repos.d/
    2. [root@k8s5 yum.repos.d]# vim dvd.repo
    3. [dvd]
    4. name=rhel7.6
    5. baseurl=file:///media
    6. gpgcheck=0
    7. [HighAvailability]
    8. name=rhel7.6 HighAvailability
    9. baseurl=file:///media/addons/HighAvailability
    10. gpgcheck=0
    11. [root@k8s5 yum.repos.d]# scp dvd.repo k8s6:/etc/yum.repos.d/

    安装软件

    1. [root@k8s5 ~]# yum install -y pacemaker pcs psmisc policycoreutils-python
    2. [root@k8s6 ~]# yum install -y pacemaker pcs psmisc policycoreutils-python

    启动pcsd服务

    1. [root@k8s5 ~]# systemctl enable --now pcsd.service
    2. [root@k8s5 ~]# ssh k8s6 systemctl enable --now pcsd.service

    设置用户密码

    1. [root@k8s5 ~]# echo westos | passwd --stdin hacluster
    2. [root@k8s5 ~]# ssh k8s6 'echo westos | passwd --stdin hacluster'

    节点认证

    1. [root@k8s5 ~]# pcs cluster auth k8s5 k8s6
    2. Username: hacluster
    3. Password: westos

    创建集群

    [root@k8s5 ~]# pcs cluster setup --name mycluster k8s5 k8s6
    

    启动集群

    [root@k8s5 ~]# pcs cluster start --all
    

    集群自启动

    [root@k8s5 ~]# pcs cluster enable --all
    

    禁用stonith

    [root@k8s5 ~]# pcs property set stonith-enabled=false
    

    添加集群资源

    1. [root@k8s5 ~]# pcs resource create vip ocf:heartbeat:IPaddr2 ip=192.168.81.200 op monitor interval=30s //创建一个 IPaddr2 类型的资源 vip,其 IP 地址为 192.168.81.200,监测间隔为 30
    2. [root@k8s5 ~]# pcs resource create haproxy systemd:haproxy op monitor interval=60s //创建一个 systemd 类型的资源 haproxy,用于启动 HAProxy 服务,监测间隔为 60
    3. [root@k8s5 ~]# pcs resource group add hagroup vip haproxy //将前两个资源添加到一个资源组 hagroup 中

    测试

    [root@k8s5 ~]# pcs node standby
    

    资源全部迁移到k8s6

    恢复

    [root@k8s5 ~]# pcs node  unstandby
    

    部署control-plane

    重置节点

    1. [root@k8s2 ~]# kubeadm reset
    2. [root@k8s2 ~]# cd /etc/cni/net.d
    3. [root@k8s2 net.d]# rm -rf *

    k8s3、k8s4以此类推

    加载内核模块(在所有集群节点执行)

    1. [root@k8s2 ~]# vim /etc/modules-load.d/k8s.conf
    2. overlay
    3. br_netfilter
    4. [root@k8s2 ~]# modprobe overlay
    5. [root@k8s2 ~]# modprobe br_netfilter

    1. [root@k8s2 ~]# vim /etc/sysctl.d/docker.conf
    2. net.bridge.bridge-nf-call-iptables=1
    3. net.bridge.bridge-nf-call-ip6tables=1
    4. net.ipv4.ip_forward=1
    5. [root@k8s2 ~]# sysctl --system

    确认软件版本

    [root@k8s2 ~]# rpm -q kubeadm kubelet kubectl
    

    生成初始化配置文件

    [root@k8s2 ~]# kubeadm config print init-defaults > kubeadm-init.yaml
    

    修改配置

    1. [root@k8s2 ~]# vim kubeadm-init.yaml
    2. apiVersion: kubeadm.k8s.io/v1beta3
    3. bootstrapTokens:
    4. - groups:
    5. - system:bootstrappers:kubeadm:default-node-token
    6. token: abcdef.0123456789abcdef
    7. ttl: 24h0m0s
    8. usages:
    9. - signing
    10. - authentication
    11. kind: InitConfiguration
    12. localAPIEndpoint:
    13. advertiseAddress: 192.168.81.11 #本机ip
    14. bindPort: 6443
    15. nodeRegistration:
    16. criSocket: unix:///var/run/containerd/containerd.sock
    17. imagePullPolicy: IfNotPresent
    18. name: k8s2 #本机主机名
    19. taints: null
    20. ---
    21. apiServer:
    22. timeoutForControlPlane: 4m0s
    23. apiVersion: kubeadm.k8s.io/v1beta3
    24. certificatesDir: /etc/kubernetes/pki
    25. clusterName: kubernetes
    26. controlPlaneEndpoint: "192.168.81.200:6443" #负载均衡地址
    27. controllerManager: {}
    28. dns: {}
    29. etcd:
    30. local:
    31. dataDir: /var/lib/etcd
    32. imageRepository: reg.westos.org/k8s #本地私有仓库
    33. kind: ClusterConfiguration
    34. kubernetesVersion: 1.25.0
    35. networking:
    36. dnsDomain: cluster.local
    37. serviceSubnet: 10.96.0.0/12
    38. podSubnet: 10.244.0.0/16 #pod分配地址段
    39. scheduler: {}
    40. ---
    41. apiVersion: kubeproxy.config.k8s.io/v1alpha1
    42. kind: KubeProxyConfiguration
    43. mode: ipvs #配置ipvs模式

    初始化集群

    [root@k8s2 ~]# kubeadm init   --config kubeadm-init.yaml --upload-certs

    部署网路插件

    [root@k8s2 calico]# kubectl apply -f calico.yaml
    

    添加其它control-plane节点

    1. [root@k8s3 containerd]# kubeadm join 192.168.81.200:6443 --token abcdef.0123456789abcdef \
    2. --discovery-token-ca-cert-hash sha256:1142c506bc44c57b7c38487a5f348b73f1eb6a19a28ab44efde287d811ad1bc2 \
    3. --control-plane --certificate-key b8feb02bce9fa4fea5676265438ec505fcc2f14501584cc938ed08684bc8c8a7
    4. [root@k8s4 containerd]# kubeadm join 192.168.81.200:6443 --token abcdef.0123456789abcdef \
    5. --discovery-token-ca-cert-hash sha256:1142c506bc44c57b7c38487a5f348b73f1eb6a19a28ab44efde287d811ad1bc2 \
    6. --control-plane --certificate-key b8feb02bce9fa4fea5676265438ec505fcc2f14501584cc938ed08684bc8c8a7

    指定 Kubernetes 的配置文件路径为 /etc/kubernetes/admin.conf 

    export KUBECONFIG=/etc/kubernetes/admin.conf

    部署worker node

    新添加的节点需要初始化配置

    1. 禁用selinux、firewalld、swap分区
    2. 部署containerd
    3. 安装kubelet、kubeadm、kubectl
    4. 配置内核模块

    禁用swap

    1. [root@k8s7 ~]# swapoff -a
    2. [root@k8s7 ~]# vim /etc/fstab

    安装containerd、kubelet、kubeadm、kubectl

    从其它节点拷贝repo文件

    [root@k8s4 yum.repos.d]# scp k8s.repo docker.repo  k8s7:/etc/yum.repos.d/
    

    安装软件

    [root@k8s7 yum.repos.d]# yum install -y containerd.io kubeadm-1.24.17-0 kubelet-1.24.17-0 kubectl-1.24.17-0
    

    自启动服务

    1. [root@k8s7 ~]# systemctl enable --now containerd
    2. [root@k8s7 ~]# systemctl enable --now kubelet

    拷贝containerd的配置文件

    [root@k8s4 containerd]# scp -r * k8s7:/etc/containerd/
    

    重启服务

    1. [root@k8s7 containerd]# systemctl restart containerd
    2. [root@k8s7 containerd]# crictl config runtime-endpoint unix:///run/containerd/containerd.sock
    3. [root@k8s7 containerd]# crictl pull myapp:v1

    配置内核模块

    1. [root@k8s4 containerd]# cd /etc/modules-load.d/
    2. [root@k8s4 modules-load.d]# scp k8s.conf k8s7:/etc/modules-load.d/
    3. [root@k8s4 modules-load.d]# cd /etc/sysctl.d/
    4. [root@k8s4 sysctl.d]# scp docker.conf k8s7:/etc/sysctl.d/
    5. [root@k8s7 ~]# modprobe overlay
    6. [root@k8s7 ~]# modprobe br_netfilter
    7. [root@k8s7 ~]# sysctl --system
    1. [root@k8s7 ~]# kubeadm join 192.168.81.200:6443 --token abcdef.0123456789abcdef \
    2. --discovery-token-ca-cert-hash sha256:1142c506bc44c57b7c38487a5f348b73f1eb6a19a28ab44efde287d811ad1bc2

    添加worker标签

    [root@k8s2 ~]# kubectl label node k8s7 node-role.kubernetes.io/woker=

     

  • 相关阅读:
    元数据管理系统
    好用的在线思维导图软件--GitMind
    AC自动机
    【软件工程导论】1.软件过程模型
    【Tailwind CSS】当页面内容过少,怎样让footer保持在屏幕底部?
    FPGA学习笔记(九)SPI学习总结及stm32的HAL库下SPI配置
    40% 的云原生开发者专注于微服务领域
    设计模式之解释器模式
    Linux 安全 - Capabilities机制
    大学生抗疫逆行者网页作业 感动人物HTML网页代码成品 最美逆行者dreamweaver网页模板 致敬疫情感动人物网页设计制作
  • 原文地址:https://blog.csdn.net/m0_64028800/article/details/134468743