• Kubernetes 1.25 集群搭建


    角色IP系统
    k8s-master01、Keepalived(主)+Nginx172.16.3.225/21Rocky Linux release 8.6 (Green Obsidian)
    k8s-master02、Keepalived(从)+Nginx172.16.3.226/21Rocky Linux release 8.6 (Green Obsidian)
    k8s-master03、Keepalived(从)+Nginx172.16.3.227/21Rocky Linux release 8.6 (Green Obsidian)
    k8s-node01172.16.4.184/21Rocky Linux release 8.6 (Green Obsidian)
    VIP172.16.3.254/21

    一、准备工作:

    1、修改默认源 – 阿里云 [可选步骤]

    	1、修改系统默认源BaseOS
    sed -e 's|^mirrorlist=|#mirrorlist=|g' \
        -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
        -i.bak \
        /etc/yum.repos.d/Rocky-*.repo
    
    	2、配置epel源
    1)安装 epel 配置包
    dnf install -y https://mirrors.aliyun.com/epel/epel-release-latest-8.noarch.rpm
    2)将 repo 配置中的地址替换为阿里云镜像站地址
    sed -i 's|^#baseurl=https://download.example/pub|baseurl=https://mirrors.aliyun.com|' /etc/yum.repos.d/epel*
    sed -i 's|^metalink|#metalink|' /etc/yum.repos.d/epel*
    
    dnf makecache
    

    2、时间同步

    (1)安装chrony服务
    dnf install chrony -y
    
    (2)修改chrony服务配置⽂件
    vi /etc/chrony.conf 
    ...
    # 注释官⽅的时间服务器,换成国内的时间服务器即可
    server ntp.aliyun.com iburst
    server ntp1.aliyun.com iburst
    server ntp2.aliyun.com iburst
    server ntp3.aliyun.com iburst
    server ntp4.aliyun.com iburst
    server ntp5.aliyun.com iburst
    ...
    
    (3)配置chronyd的开机⾃启动
    systemctl enable --now chronyd
    systemctl restart chronyd
    
    (4)查看服务
    systemctl status chronyd
    

    3、安装依赖软件

    dnf install ipvsadm ipset tar bash-completion yum-utils device-mapper-persistent-data lvm2 iproute-tc -y
    

    4、修改Hostname,配置hosts

    172.16.3.225设置主机名: hostnamectl set-hostname k8s-master01
    172.16.3.226设置主机名: hostnamectl set-hostname k8s-master02
    172.16.3.227设置主机名: hostnamectl set-hostname k8s-master03
    172.16.4.184设置主机名: hostnamectl set-hostname k8s-node01
    
    cat >> /etc/hosts << 'EOF'
    172.16.3.225 k8s-master01
    172.16.3.226 k8s-master02
    172.16.3.227 k8s-master03
    172.16.4.184 k8s-node01
    172.16.3.254 vip.apiserver
    EOF
    

    5、关闭防火墙、关闭Selinux、关闭SWAP

    systemctl disable firewalld --now
    sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
    swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
    

    6、内核升级

    # 查看内核版本
    uname -sr
    
    # 0、升级所有软件包
    dnf update -y && dnf upgrade -y
    
    # 1、下载公钥
    rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
    dnf install https://www.elrepo.org/elrepo-release-8.el8.elrepo.noarch.rpm -y
    
    # 2、仓库启用后,列出可用的内核相关包:
    dnf --disablerepo="*" --enablerepo="elrepo-kernel" list available 
    
    kernel-lt: long term support:长期支持版
    kernel-ml: mainline stable: 主线稳定版
    
    # 3、选择自己的版本进行安装,安装最新版
    dnf --enablerepo=elrepo-kernel install kernel-ml kernel-ml-devel kernel-ml-headers -y
    
    # 4、重启
    shutdown -r now
    
    # 5、查看内核
    uname -sr
    

    7、开启IPVS模块、内核优化

    cat > /etc/modules-load.d/ipvs.conf <> /etc/security/limits.conf << 'EOF'
    * soft nproc 65535
    * hard nproc 130070
    * soft nofile 65535
    * hard nofile 130070
    * soft memlock unlimited
    * hard memlock unlimited
    EOF
    
    
    https://developer.aliyun.com/article/718976		# 阿里云优化建议
    cat >> /etc/sysctl.conf << 'EOF'
    fs.file-max = 2097152
    # 减少swap使用,默认是60、0表示禁止
    vm.swappiness = 0
    # 设置为1,内核允许分配所有的物理内存,Redis常用
    vm.overcommit_memory = 1
    
    fs.inotify.max_user_instances = 524288
    fs.inotify.max_user_watches = 524288
    
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    
    net.nf_conntrack_max = 2097152
    net.netfilter.nf_conntrack_tcp_timeout_established = 300
    net.netfilter.nf_conntrack_max = 1048576
    net.netfilter.nf_conntrack_tcp_timeout_close_wait = 60
    net.netfilter.nf_conntrack_tcp_timeout_fin_wait = 120
    net.netfilter.nf_conntrack_tcp_timeout_time_wait = 120
    net.netfilter.nf_conntrack_buckets = 655360
    net.ipv4.conf.default.forwarding = 1
    
    # 开启系统路由转发功能
    net.ipv4.ip_forward = 1
    net.ipv4.neigh.default.gc_thresh1 = 1024
    net.ipv4.neigh.default.gc_thresh2 = 4096
    net.ipv4.neigh.default.gc_thresh3 = 8192
    # 设置为1
    net.ipv4.tcp_no_metrics_save = 1 
    # 设置为1,防止 SYNC FLOOD 攻击
    net.ipv4.tcp_syncookies = 1 
    # TIME_WAIT socket的最大数目,不宜太大或者太小,nginx反向代理必备
    net.ipv4.tcp_max_tw_buckets = 50000
    # 打开 SACK 选项,设置为1
    net.ipv4.tcp_sack = 1
    # 激活窗口扩充因子,支持64kb以上数据传输
    net.ipv4.tcp_window_scaling = 1 
    # TCP 缓冲区内存,连接数达到非常高时候需要配置好
    net.ipv4.tcp_mem = 786432 2097152 3145728  
    net.ipv4.tcp_rmem = 4096 4096 16777216
    net.ipv4.tcp_wmem = 4096 4096 16777216 
    # 不属于任何进程的socket数目,不宜太大,防止攻击
    net.ipv4.tcp_max_orphans = 65535 
    # SYNC等待队列长度,适当,太大了排队也没用
    net.ipv4.tcp_max_syn_backlog = 65535 
    # 禁用timestamp,重要,高并发下设置为0
    net.ipv4.tcp_timestamps = 0 
    # 发送 SYNC+ACK 的重试次数,不宜太大,5以内
    net.ipv4.tcp_synack_retries = 1
    # 发送SYNC的重试次数,不宜太大,5以内
    net.ipv4.tcp_syn_retries = 1 
    # 服务端主动关闭后,客户端释放连接的超时,重要,<30
    net.ipv4.tcp_fin_timeout = 5
    # 允许TCP保持的空闲keepalive时长,不需要太长
    net.ipv4.tcp_keepalive_time = 30
    # 系统作为TCP客户端连接自动使用的端口(start,end),可发起并发连接数为end-start
    net.ipv4.ip_local_port_range = 10240 65535
    # 允许重用TCP连接,重要,必须为1
    net.ipv4.tcp_tw_reuse = 1
    
    # 禁用 sysrq 功能
    kernel.sysrq = 0 
    # 控制 core 文件的文件名中是否添加 pid 作为扩展
    kernel.core_uses_pid = 1 
    # 消息队列的最大消息大小,默认8k,建议64kb
    kernel.msgmax = 65536
    # 消息队列存放消息的总字节数
    kernel.msgmnb = 163840 
    
    # socket缓冲区默认值和最大值
    net.core.wmem_default = 8388608
    net.core.rmem_default = 8388608
    net.core.rmem_max = 16777216
    net.core.wmem_max = 16777216 
    # ACCEPT等待队列长度,适当,太大了堆积也无用 
    net.core.netdev_max_backlog = 65535 
    # 允许最大并发连接数,重要
    net.core.somaxconn = 65535 
    EOF
    sysctl -p
    
    cat >> /etc/systemd/system.conf << 'EOF'
    DefaultLimitNOFILE=1024000
    DefaultLimitNPROC=1024000
    EOF
    

    二、实验步骤:

    1、安装Keepalived+Nginx

    • 在Master上安装
     dnf install epel-release -y
     dnf install nginx keepalived -y
    

    1.1、nginx配置文件(主/备一样)

    cp /etc/nginx/nginx.conf{,.bak}
    cat > /etc/nginx/nginx.conf << 'EOF'
    user nginx;
    worker_processes auto;
    pid /var/run/nginx.pid;
    
    include /usr/share/nginx/modules/*.conf;
    
    worker_rlimit_nofile 1024;
    events {
    		use epoll;
        worker_connections 1024;
    }
    
    http {
        log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                          '$status $body_bytes_sent "$http_referer" '
                          '"$http_user_agent" "$http_x_forwarded_for"';
    
        sendfile            on;
        tcp_nopush          on;
        tcp_nodelay         on;
        keepalive_timeout   65;
        types_hash_max_size 2048;
    
        include             /etc/nginx/mime.types;
        default_type        application/octet-stream;
    }
    
    # 四层负载均衡,为三台Master apiserver组件提供负载均衡
    stream {
    
        log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    
        access_log  /var/log/nginx/k8s-access.log  main;
    	  error_log /var/log/nginx/k8s-error.log error;
    	
        upstream k8s-apiserver {
           server 172.16.3.225:6443;   # Master1 APISERVER IP:PORT
           server 172.16.3.226:6443;   # Master2 APISERVER IP:PORT
           server 172.16.3.227:6443;   # Master3 APISERVER IP:PORT
        }
    
        server {
           listen 16443;  # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
           proxy_pass k8s-apiserver;
        }
    }
    EOF
    

    1.2、keepalived配置文件(Nginx Master)

    • 172.16.3.225 配置
    cp /etc/keepalived/keepalived.conf{,.bak}
    cat > /etc/keepalived/keepalived.conf << 'EOF'
    global_defs { 
       notification_email { 
         acassen@firewall.loc 
         failover@firewall.loc 
         sysadmin@firewall.loc 
       } 
       notification_email_from Alexandre.Cassen@firewall.loc  
       smtp_server 127.0.0.1 
       smtp_connect_timeout 30 
       router_id NGINX_MASTER
    } 
    
    vrrp_script check_nginx {
        script "/etc/keepalived/check_nginx.sh"
        interval 2   # 脚本执行间隔,每2s检测一次
    }
    
    vrrp_instance VI_1 { 
        state MASTER 
        interface ens192  # 修改为实际网卡名
        virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
        priority 100    # 优先级,备服务器设置低于100
        advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
        authentication { 
            auth_type PASS      
            auth_pass 1111 
        }  
        # 虚拟IP
        virtual_ipaddress { 
            172.16.3.254/21
        } 
        track_script {
            check_nginx
        } 
    }
    EOF
    
    • vrrp_script:指定检查nginx工作状态脚本(根据nginx状态判断是否故障转移)
    • virtual_ipaddress:虚拟IP(VIP)

    准备上述配置文件中检查nginx运行状态的脚本:

    cat > /etc/keepalived/check_nginx.sh  << 'EOF'
    #!/bin/bash
    count=$(ss -antp |grep 16443 |egrep -cv "grep|$$")
    
    if [ "$count" -eq 0 ];then
    #    exit 1
        systemctl stop keepalived.service
    else
        exit 0
    fi
    EOF
    chmod +x /etc/keepalived/check_nginx.sh
    

    1.3、keepalived配置文件(Nginx Backup)

    • 172.16.3.226 配置
    cat > /etc/keepalived/keepalived.conf << EOF
    global_defs { 
       notification_email { 
         acassen@firewall.loc 
         failover@firewall.loc 
         sysadmin@firewall.loc 
       } 
       notification_email_from Alexandre.Cassen@firewall.loc  
       smtp_server 127.0.0.1 
       smtp_connect_timeout 30 
       router_id NGINX_BACKUP
    } 
    
    vrrp_script check_nginx {
        script "/etc/keepalived/check_nginx.sh"
        interval 2   # 脚本执行间隔,每2s检测一次
    }
    
    vrrp_instance VI_1 { 
        state BACKUP 
        interface ens192
        virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
        priority 90
        advert_int 1
        authentication { 
            auth_type PASS      
            auth_pass 1111 
        }  
        virtual_ipaddress { 
            172.16.3.254/21
        } 
        track_script {
            check_nginx
        } 
    }
    EOF
    

    准备上述配置文件中检查nginx运行状态的脚本:

    cat > /etc/keepalived/check_nginx.sh  << "EOF"
    #!/bin/bash
    count=$(ss -antp |grep 16443 |egrep -cv "grep|$$")
    
    if [ "$count" -eq 0 ];then
    #    exit 1
    		systemctl stop keepalived.service
    else
        exit 0
    fi
    EOF
    chmod +x /etc/keepalived/check_nginx.sh
    

    注:keepalived根据脚本返回状态码(0为工作正常,非0不正常)判断是否故障转移。

    1.4、keepalived配置文件(Nginx Backup)

    • 172.16.3.227 配置
    cat > /etc/keepalived/keepalived.conf << EOF
    global_defs { 
       notification_email { 
         acassen@firewall.loc 
         failover@firewall.loc 
         sysadmin@firewall.loc 
       } 
       notification_email_from Alexandre.Cassen@firewall.loc  
       smtp_server 127.0.0.1 
       smtp_connect_timeout 30 
       router_id NGINX_BACKUP
    } 
    
    vrrp_script check_nginx {
        script "/etc/keepalived/check_nginx.sh"
        interval 2   # 脚本执行间隔,每2s检测一次
    }
    
    vrrp_instance VI_1 { 
        state BACKUP 
        interface ens192
        virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
        interval 2   # 脚本执行间隔,每2s检测一次
        weight -20    # 脚本结果导致的优先级变更,检测失败(脚本返回非0)则优先级 -5
        priority 80
        advert_int 1
        authentication { 
            auth_type PASS      
            auth_pass 1111 
        }  
        virtual_ipaddress { 
            172.16.3.254/21
        } 
        track_script {
            check_nginx
        } 
    }
    EOF
    

    准备上述配置文件中检查nginx运行状态的脚本:

    cat > /etc/keepalived/check_nginx.sh  << "EOF"
    #!/bin/bash
    count=$(ss -antp |grep 16443 |egrep -cv "grep|$$")
    
    if [ "$count" -eq 0 ];then
    		systemctl stop keepalived.service
    else
        exit 0
    fi
    EOF
    chmod +x /etc/keepalived/check_nginx.sh
    

    注:keepalived根据脚本返回状态码(0为工作正常,非0不正常)判断是否故障转移

    1.5、启动并设置开机启动

    systemctl daemon-reload
    systemctl enable nginx --now
    systemctl enable keepalived --now
    
    systemctl status nginx
    systemctl status keepalived
    

    2、安装Containerd

    • 所有主机安装、这里我用的是清华源
    yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
    sudo sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
    dnf install containerd.io -y
    

    2.1、配置Containerd

    containerd config default > /etc/containerd/config.toml	  # 导出Containerd默认配置文件
    sed -i "s#/var/lib/containerd#/data/containerd#" /etc/containerd/config.toml
    sed -i "s#/run/containerd#/data/containerd/state#" /etc/containerd/config.toml
    sed -i "s#/data/containerd/state/containerd.sock#/run/containerd/containerd.sock#" /etc/containerd/config.toml
    sed -i "s#/opt/containerd#/data/containerd/containers#" /etc/containerd/config.toml
    sed -i 's#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/google_containers#' /etc/containerd/config.toml
    sed -i 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml
    
    cat > /etc/crictl.yaml << 'EOF'
    runtime-endpoint: unix:///run/containerd/containerd.sock
    image-endpoint: unix:///run/containerd/containerd.sock
    timeout: 10    # 超时时间
    debug: false   # debug是否输出debug信息
    EOF
    mkdir /data
    systemctl enable containerd --now
    systemctl status containerd
    

    3、安装Kubernetes

    • 所有主机执行、这里我用的是清华源
    cat > /etc/yum.repos.d/kubernetes.repo << 'EOF'
    [kubernetes]
    name=kubernetes
    baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-$basearch
    enabled=1
    EOF
    dnf install -y --nogpgcheck kubelet kubeadm kubectl
    systemctl enable kubelet --now
    

    3.1、Kubernetes集群初始化准备

    • 172.16.3.225上执行
    kubeadm config print init-defaults > kubeadm-init.yaml
    cat > 'kubeadm-init.yaml' << 'EOF'
    apiVersion: kubeadm.k8s.io/v1beta3
    bootstrapTokens:
    - groups:
      - system:bootstrappers:kubeadm:default-node-token
      token: abcdef.0123456789abcdef
      ttl: 24h0m0s
      usages:
      - signing
      - authentication
    kind: InitConfiguration
    localAPIEndpoint:
      advertiseAddress: 172.16.3.225    # 修改成本机IP
      bindPort: 6443
    nodeRegistration:
      criSocket: unix:///var/run/containerd/containerd.sock
      imagePullPolicy: IfNotPresent
      name: k8s-master01    # 修改成本机名字
      taints: null
    ---
    apiServer:
      timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta3
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controlPlaneEndpoint: 172.16.3.254:16443    # 新增写自己的VIP,以及负载均衡端口
    controllerManager: {}
    dns: {}
    etcd:
      local:
        dataDir: /var/lib/etcd   # etcd数据存放位置
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers    # 指定阿里云镜像
    kind: ClusterConfiguration
    kubernetesVersion: 1.25.2    # 修改成自己Kubernetes版本
    networking:
      dnsDomain: cluster.local
      podSubnet: 10.244.0.0/16    # 新增Pod地址
      serviceSubnet: 10.96.0.0/12
    scheduler: {}
    # 新增如下内容: 开启Kube-Proxy IPVS模块,默认是iptables
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    EOF
    
    kubeadm config images pull --config=kubeadm-init.yaml			# 下载镜像
    kubeadm init --config kubeadm-init.yaml | tee kubeadm-init.log		# 初始化集群
    
    • 以下内容是初始化输出的流程:
    [init] Using Kubernetes version: v1.25.2
    [preflight] Running pre-flight checks
    [preflight] Pulling images required for setting up a Kubernetes cluster
    [preflight] This might take a minute or two, depending on the speed of your internet connection
    [preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
    [certs] Using certificateDir folder "/etc/kubernetes/pki"
    [certs] Generating "ca" certificate and key
    [certs] Generating "apiserver" certificate and key
    [certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.16.3.225 172.16.3.254]
    [certs] Generating "apiserver-kubelet-client" certificate and key
    [certs] Generating "front-proxy-ca" certificate and key
    [certs] Generating "front-proxy-client" certificate and key
    [certs] Generating "etcd/ca" certificate and key
    [certs] Generating "etcd/server" certificate and key
    [certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.16.3.225 127.0.0.1 ::1]
    [certs] Generating "etcd/peer" certificate and key
    [certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [172.16.3.225 127.0.0.1 ::1]
    [certs] Generating "etcd/healthcheck-client" certificate and key
    [certs] Generating "apiserver-etcd-client" certificate and key
    [certs] Generating "sa" key and public key
    [kubeconfig] Using kubeconfig folder "/etc/kubernetes"
    W0929 15:46:26.363353   15180 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "admin.conf" kubeconfig file
    W0929 15:46:26.523144   15180 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "kubelet.conf" kubeconfig file
    W0929 15:46:26.745022   15180 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "controller-manager.conf" kubeconfig file
    W0929 15:46:27.021676   15180 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [kubeconfig] Writing "scheduler.conf" kubeconfig file
    [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
    [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
    [kubelet-start] Starting the kubelet
    [control-plane] Using manifest folder "/etc/kubernetes/manifests"
    [control-plane] Creating static Pod manifest for "kube-apiserver"
    [control-plane] Creating static Pod manifest for "kube-controller-manager"
    [control-plane] Creating static Pod manifest for "kube-scheduler"
    [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
    [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
    [apiclient] All control plane components are healthy after 11.528231 seconds
    [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
    [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
    [upload-certs] Skipping phase. Please see --upload-certs
    [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
    [mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
    [bootstrap-token] Using token: abcdef.0123456789abcdef
    [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
    [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
    [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
    [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
    [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
    [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
    [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
    W0929 15:46:42.544999   15180 endpoint.go:57] [endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
    [addons] Applied essential addon: CoreDNS
    [addons] Applied essential addon: kube-proxy
    
    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    You can now join any number of control-plane nodes by copying certificate authorities
    and service account keys on each node and then running the following as root:
    
      kubeadm join 172.16.3.254:16443 --token abcdef.0123456789abcdef \
    	--discovery-token-ca-cert-hash sha256:872c34cc291025d258f6dc10de3f1789086f2d82a2a2156806b16ded736e62de \
    	--control-plane 
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 172.16.3.254:16443 --token abcdef.0123456789abcdef \
    	--discovery-token-ca-cert-hash sha256:419e25aa5089819dc6cdae3996b92e950e28504ecdd33bdca230ec5379731764 
    	
    
    在所有Master上执行:
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    以下节点加入角色Master执行:
    kubeadm join 172.16.3.254:16443 --token abcdef.0123456789abcdef \
    	--discovery-token-ca-cert-hash sha256:872c34cc291025d258f6dc10de3f1789086f2d82a2a2156806b16ded736e62de \
    	--control-plane 
    
    以下节点加入角色Node执行:
    kubeadm join 172.16.3.254:16443 --token abcdef.0123456789abcdef \
    	--discovery-token-ca-cert-hash sha256:419e25aa5089819dc6cdae3996b92e950e28504ecdd33bdca230ec5379731764 
    

    3.2、如果在加入集群出错执行以下步骤

    报错内容:
    [failure loading certificate for CA: couldn't load the certificate file /etc/kubernetes/pki/ca.crt: open /etc/kubernetes/pki/ca.crt: no such file or directory, failure loading key for service account: couldn't load the private key file /etc/kubernetes/pki/sa.key: open /etc/kubernetes/pki/sa.key: no such file or directory, failure loading certificate for front-proxy CA: couldn't load the certificate file /etc/kubernetes/pki/front-proxy-ca.crt: open /etc/kubernetes/pki/front-proxy-ca.crt: no such file or directory, failure loading certificate for etcd CA: couldn't load the certificate file /etc/kubernetes/pki/etcd/ca.crt: open /etc/kubernetes/pki/etcd/ca.crt: no such file or directory]
    
    cd /etc/kubernetes && tar -zcf k8s-key.tar.gz admin.conf pki/ca.* pki/sa.* pki/front-proxy-ca.* pki/etcd/ca.*
    
    scp /etc/kubernetes/k8s-key.tar.gz k8s-master02:/etc/kubernetes
    scp /etc/kubernetes/k8s-key.tar.gz k8s-master03:/etc/kubernetes
    
    另外两台Master压缩证书,再次加入进去测试;
    

    4、安装Calico网络

    • 在Master01上执行
    kubectl get no	# 查看Node状态都是NotReady是因为网络查件没有准备就绪
    NAME           STATUS     ROLES           AGE     VERSION
    k8s-master01   NotReady   control-plane   5m54s   v1.25.2
    k8s-master02   NotReady   control-plane   4m13s   v1.25.2
    k8s-master03   NotReady   control-plane   4m14s   v1.25.2
    k8s-node01     NotReady             2m40s   v1.25.2
    
    wget https://projectcalico.docs.tigera.io/manifests/calico.yaml
    修改CALICO_IPV4POOL_CIDR,改成自己设置的pod地址范围,注意缩进
    4551             - name: CALICO_IPV4POOL_CIDR
    4552               value: "10.244.0.0/16"
    kubectl apply -f calico.yaml
    
    kubectl get po -A	# 耐心等待所有容器变成Running状态
    kubectl get no
    NAME           STATUS   ROLES           AGE   VERSION
    k8s-master01   Ready    control-plane   13m   v1.25.2
    k8s-master02   Ready    control-plane   12m   v1.25.2
    k8s-master03   Ready    control-plane   12m   v1.25.2
    k8s-node01     Ready              10m   v1.25.2
    

    5、配置Kubernetes Tables功能

    cat >> ~/.bashrc << 'EOF'
    source /usr/share/bash-completion/bash_completion
    source <(kubectl completion bash)
    EOF
    source ~/.bashrc
    

    三、测试一下Kubernetes是否正常

    cat > myapp.yaml << 'EOF'
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name:  myapp
      namespace: default
    spec:
      selector:
        matchLabels:
          app: myapp
      replicas: 5
      template:
        metadata:
          labels:
            app:  myapp
        spec:
          containers:
          - name:  myapp
            image:  hetaotao/myapp:v1
            ports:
            - containerPort:  80
              name:  myapp
            volumeMounts:
            - name: localtime
              mountPath: /etc/localtime
          volumes:
            - name: localtime
              hostPath:
                path: /usr/share/zoneinfo/Asia/Shanghai
          restartPolicy: Always
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: myapp
      namespace: default
    spec:
      selector:
        app: myapp
      type: NodePort
      ports:
      - name: myapp
        protocol: TCP
        port: 80
        targetPort: 80
    EOF
    

    四、扩展知识:

    1、部署Ingress

    wget https://ghproxy.com/https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/baremetal/deploy.yaml
    
    grep "image:" deploy.yaml 	# 将Yaml文件的镜像修改地址成国内的
        image: lank8s.cn/ingress-nginx/controller:v1.3.1
        image: lank8s.cn/ingress-nginx/kube-webhook-certgen:v1.3.0
        image: lank8s.cn/ingress-nginx/kube-webhook-certgen:v1.3.0
    
    kubectl get po,svc -n ingress-nginx 
    NAME                                            READY   STATUS      RESTARTS   AGE
    pod/ingress-nginx-admission-create-z844l        0/1     Completed   0          73s
    pod/ingress-nginx-admission-patch-2x2sq         0/1     Completed   0          73s
    pod/ingress-nginx-controller-554f784744-g7zp6   1/1     Running     0          73s
    
    NAME                                         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
    service/ingress-nginx-controller             NodePort    10.104.115.74   >        80:31743/TCP,443:32008/TCP   73s
    service/ingress-nginx-controller-admission   ClusterIP   10.97.58.184    >        443/TCP                      73s
    

    2、部署个Pod 测试一下网络以及Ingress是否正常

    cat > nginx.yaml << 'EOF'
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name:  nginx
      namespace: default
    spec:
      selector:
        matchLabels:
          app: nginx
      replicas: 2
      template:
        metadata:
          labels:
            app:  nginx
        spec:
          containers:
          - name:  nginx
            image:  nginx:latest
            ports:
            - containerPort:  80
              name:  nginx
            volumeMounts:
            - name: localtime
              mountPath: /etc/localtime
          volumes:
            - name: localtime
              hostPath:
                path: /usr/share/zoneinfo/Asia/Shanghai
          restartPolicy: Always
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx
      namespace: default
    spec:
      selector:
        app: nginx
      type: ClusterIP
      ports:
      - name: nginx
        protocol: TCP
        port: 80
        targetPort: 80
    ---
    apiVersion: networking.k8s.io/v1
    kind: Ingress
    metadata:
      name: nginx
      namespace: default
    spec:
      ingressClassName: nginx
      rules:
      - host: foo.bar.com
        http:
          paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: nginx
                port:
                  number: 80
    EOF
    
    • 测试一下Nginx Pod是否正常
    kubectl get po,svc,ingress
    NAME                         READY   STATUS    RESTARTS   AGE
    pod/nginx-8456669dfb-678d9   1/1     Running   0          67s
    
    NAME                 TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
    service/kubernetes   ClusterIP   10.96.0.1                443/TCP   28m
    service/nginx        ClusterIP   10.106.188.230           80/TCP    67s
    
    NAME                              CLASS   HOSTS         ADDRESS   PORTS   AGE
    ingress.networking.k8s.io/nginx   nginx   foo.bar.com             80      67s
    
    curl -l http://foo.bar.com:31743/
    

    3、Kubelet 优化资源预留

    [root@k8s-master ~]# ps -ef | grep kubelet
    root     2035781       1  4 12:59 ?        00:01:45 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --network-plugin=cni --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.4.1
    
    [root@k8s-master ~]# vim /var/lib/kubelet/kubeadm-flags.env 
    KUBELET_KUBEADM_ARGS="--network-plugin=cni --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.4.1 --kube-reserved=cpu=1,memory=256Mi --system-reserved=cpu=1,memory=256Mi --max-pods=600"
    
    [root@k8s-master ~]# systemctl daemon-reload && systemctl restart kubelet
    [root@k8s-master ~]# ps -ef | grep kubelet
    root     2035781       1  4 12:59 ?        00:01:47 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --network-plugin=cni --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.4.1 --kube-reserved=cpu=1,memory=256Mi --system-reserved=cpu=1,memory=256Mi --max-pods=600
    

    –kube-reserved=cpu=1,memory=256Mi # 指定 Kubelet 预留的 CPU 和内存资源,以供 Kubernetes 系统组件使用。

    –system-reserved=cpu=1,memory=256Mi # 指定 Kubelet 预留的 CPU 和内存资源,以供系统进程使用。

    –max-pods=600 # 指定节点上最多可运行的 Pod 数量,默认是110

    更多参数可以查看 kubelet --help

  • 相关阅读:
    C理解(一):内存与位操作
    【Linux基础】Linux发展史
    HTML学生个人网站作业设计:游戏网站设计——原神首页 1页 带轮播图
    Ubuntu20.04安装Nvidia显卡驱动、CUDA11.3、CUDNN、TensorRT、Anaconda、ROS/ROS2
    2021秋季算法入门班第五章习题:优先队列、并查集 1027 [NOIP2017]奶酪
    CSS前导知识
    OKR与个人成长:生活中的 OKR
    太赞了,使用应用魔方 AppCube,我没有写一行代码就开发出了一款投票微信小程序
    LeetCode刷题日志-17.电话号码的字母组合
    报时机器人的rasa shell执行流程分析
  • 原文地址:https://blog.csdn.net/qq_43164571/article/details/127109850