• Kubernetes v1.25 搭建单节点集群用于Debug K8S源码


    参考说明

    参考自:v1.25.0-CentOS-binary-install-IPv6-IPv4-Three-Masters-Two-Slaves.md,按照自己的理解修改了下。

    搭建好的单节点v1.25.4版本集群

    在这里插入图片描述
    在这里插入图片描述

    1. 集群环境准备

    1.1. 主机规划

    IP主机名主机角色操作系统安装组件
    192.168.11.71k8s-master1master,workerCentos7.9api-server, controller-manager, scheduler, etcd, kubectl, kubelet, kube-proxy, containerd, runc

    1.2. 软件版本

    软件版本备注
    kubernetesv1.25.4
    etcdv3.5.4
    calicov3.19.4
    corendsv1.8.4
    containerdv1.6.10
    runcv1.1.0
    crictlv1.24.2
    cniv1.1.1
    cfsslv1.6.3

    1.3. 网络分配

    网络名称网段备注
    Node网络192.168.11.0/24
    Service网络10.96.0.0/12
    Pod网络172.16.0.0/12

    2. 集群部署

    2.1. 主机准备

    UUID也需要修改,每个主机的UUID必须要设置不一致,MAC地址也必须不一样,对于通过虚拟机clone的方式尤其需要注意,它们的UUID, MAC地址是一样的
    分别修改每台主机的IP地址,以及hostname;由于我这里每台主机都是通过VMware克隆出来的,所以其主机IP都是一样的

    sed -i 's/IPADDR=192.168.11.11/IPADDR=192.168.11.71/g' /etc/sysconfig/network-scripts/ifcfg-ens33
    hostnamectl set-hostname k8s-master1
    reboot
    
    
    • 1
    • 2
    • 3
    • 4
    cat >> /etc/hosts << EOF
    192.168.11.71 k8s-master1
    EOF
    
    # 时间同步
    yum install -y chrony
    systemctl start chronyd
    systemctl enable chronyd
    chronyc sources
    
    # 关闭防火墙
    systemctl disable --now firewalld
    
    # 关闭selinux
    setenforce 0
    sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
    sestatus
    
    # 关闭交换分区
    sed -ri 's/.*swap.*/#&/' /etc/fstab
    swapoff -a && sysctl -w vm.swappiness=0
    cat /etc/fstab
    
    # 时间同步
    yum install -y chrony
    systemctl start chronyd
    systemctl enable chronyd
    chronyc sources
    
    # 网络配置
    # 方式一
    # systemctl disable --now NetworkManager
    # systemctl start network && systemctl enable network
    
    # 方式二
    cat > /etc/NetworkManager/conf.d/calico.conf << EOF
    [keyfile]
    unmanaged-devices=interface-name:cali*;interface-name:tunl*
    EOF
    systemctl restart NetworkManager
    
    # 主机系统优化
    ulimit -SHn 65535
    cat >> /etc/security/limits.conf <<EOF
    * soft nofile 655360
    * hard nofile 131072
    * soft nproc 655350
    * hard nproc 655350
    * seft memlock unlimited
    * hard memlock unlimitedd
    EOF
    
    yum install ipvsadm ipset sysstat conntrack libseccomp -y
    
    tee /etc/modules-load.d/ipvs.conf << 'EOF'
    ip_vs
    ip_vs_lc
    ip_vs_wlc
    ip_vs_rr
    ip_vs_wrr
    ip_vs_lblc
    ip_vs_lblcr
    ip_vs_dh
    ip_vs_sh
    ip_vs_fo
    ip_vs_nq
    ip_vs_sed
    ip_vs_ftp
    nf_conntrack
    ip_tables
    ip_set
    xt_set
    ipt_set
    ipt_rpfilter
    ipt_REJECT
    ipip
    EOF
    
    systemctl restart systemd-modules-load.service
    
    # 查看模块是否已经被启用
    lsmod | grep -e ip_vs -e nf_conntrack
    
    # 优化内核参数
    tee /etc/sysctl.d/k8s.conf << 'EOF'
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-iptables = 1
    fs.may_detach_mounts = 1
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    fs.inotify.max_user_watches=89100
    fs.file-max=52706963
    fs.nr_open=52706963
    net.netfilter.nf_conntrack_max=2310720
    
    net.ipv4.tcp_keepalive_time = 600
    net.ipv4.tcp_keepalive_probes = 3
    net.ipv4.tcp_keepalive_intvl =15
    net.ipv4.tcp_max_tw_buckets = 36000
    net.ipv4.tcp_tw_reuse = 1
    net.ipv4.tcp_max_orphans = 327680
    net.ipv4.tcp_orphan_retries = 3
    net.ipv4.tcp_syncookies = 1
    net.ipv4.tcp_max_syn_backlog = 16384
    net.ipv4.ip_conntrack_max = 65536
    net.ipv4.tcp_max_syn_backlog = 16384
    net.ipv4.tcp_timestamps = 0
    net.core.somaxconn = 16384
    
    net.ipv6.conf.all.disable_ipv6 = 0
    net.ipv6.conf.default.disable_ipv6 = 0
    net.ipv6.conf.lo.disable_ipv6 = 0
    net.ipv6.conf.all.forwarding = 1
    
    EOF
    
    sysctl --system
    
    # 加载containerd相关内核模块
    modprobe overlay
    modprobe br_netfilter
    
    tee /etc/modules-load.d/containerd.conf << 'EOF'
    overlay
    br_netfileter
    EOF
    
    systemctl enable --now systemd-modules-load.service
    
    # 安装必备工具
    yum -y install wget jq psmisc vim net-tools tree telnet yum-utils device-mapper-persistent-data lvm2 git network-scripts tar curl tree -y
    
    reboot
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134

    重启之后需要查看一下ipvs, containerd相关模块加载情况

    lsmod | grep --color=auto -e ip_vs -e nf_conntrack
    
    lsmod | egrep 'br_netfilter | overlay'
    
    
    • 1
    • 2
    • 3
    • 4

    2.2. 下载etcd, k8s, cfssl软件安装包

    K8S版本可以参考该链接:github参考地址

    # 创建工作目录
    mkdir -p /data/work
    cd /data/work
    
    # 下载cfssl工具
    CFSSL_VERSION=1.6.3
    wget -O cfssl_linux-amd64 https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v${CFSSL_VERSION}/cfssl_${CFSSL_VERSION}_linux_amd64
    wget -O cfssljson_linux-amd64 https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v${CFSSL_VERSION}/cfssljson_${CFSSL_VERSION}_linux_amd64
    wget -O cfssl-certinfo_linux-amd64 https://ghproxy.com/https://github.com/cloudflare/cfssl/releases/download/v${CFSSL_VERSION}/cfssl-certinfo_${CFSSL_VERSION}_linux_amd64
    
    chmod +x cfssl*
    mv cfssl_linux-amd64 /usr/local/bin/cfssl
    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
    mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
    
    
    
    ETCD_VERSION=v3.5.4
    wget https://ghproxy.com/https://github.com/etcd-io/etcd/releases/download/${ETCD_VERSION}/etcd-${ETCD_VERSION}-linux-amd64.tar.gz
    tar -zxvf etcd-${ETCD_VERSION}-linux-amd64.tar.gz
    cp -ar etcd-${ETCD_VERSION}-linux-amd64/etcd* /usr/local/bin
    chmod +x /usr/local/bin/etcd*
    ls -ll /usr/local/bin/etcd*
    etcdctl version
    
    
    CONTAINERD_VERSION=1.6.10
    wget https://ghproxy.com/https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/cri-containerd-cni-${CONTAINERD_VERSION}-linux-amd64.tar.gz
    tar -xvzf cri-containerd-cni-*-linux-amd64.tar.gz -C /
    
    RUNC_VERSION=1.1.4
    wget https://ghproxy.com/https://github.com/opencontainers/runc/releases/download/v${RUNC_VERSION}/runc.amd64
    mv runc.amd64 runc && chmod +x runc && mv -f runc /usr/local/sbin/
    
    
    K8S_VERSION=v1.25.4
    wget https://storage.googleapis.com/kubernetes-release/release/${K8S_VERSION}/kubernetes-server-linux-amd64.tar.gz
    tar zxvf kubernetes-server-linux-amd64.tar.gz
    cd  kubernetes/server/bin/
    
    cp kube-apiserver kube-controller-manager kube-scheduler kubectl kubelet kube-proxy /usr/local/bin/
    
    # 退出目录,回到/data/work工作目录中,后续所有的操作依然在本目录中
    cd /data/work/
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45

    2.3. 生成CA证书

    cat > ca-csr.json   << EOF
    {
      "CN": "kubernetes",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "Kubernetes",
          "OU": "Kubernetes-manual"
        }
      ],
      "ca": {
        "expiry": "876000h"
      }
    }
    EOF
    
    cfssl gencert -initca ca-csr.json  | cfssljson -bare ca
    
    cat > ca-config.json << EOF
    {
      "signing": {
        "default": {
          "expiry": "876000h"
        },
        "profiles": {
          "kubernetes": {
            "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ],
            "expiry": "876000h"
          }
        }
      }
    }
    EOF
    
    mkdir -p /etc/kubernetes/pki
    cp ca*.pem /etc/kubernetes/pki/
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48

    2.4. 部署ETCD

    # 生成ETCD CA请求文件
    tee etcd-ca-csr.json << 'EOF'
    {
      "CN": "etcd",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "etcd",
          "OU": "Etcd Security"
        }
      ],
      "ca": {
        "expiry": "876000h"
      }
    }
    EOF
    
    # 创建etcd csr请求文件
    tee etcd-csr.json << 'EOF'
    {
      "CN": "etcd",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "etcd",
          "OU": "Etcd Security"
        }
      ]
    }
    EOF
    
    # 生成ETCD CA证书
    cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
    
    # 生成etcd相关证书
    cfssl gencert \
       -ca=etcd-ca.pem \
       -ca-key=etcd-ca-key.pem \
       -config=ca-config.json \
       -hostname=127.0.0.1,k8s-master1,192.168.11.71 \
       -profile=kubernetes \
       etcd-csr.json | cfssljson -bare etcd
    ls -ll etcd*.pem
    
    # 创建ETCD服务配置文件
    tee etcd-config.yml << 'EOF'
    name: 'etcd1'
    data-dir: /var/lib/etcd
    wal-dir: /var/lib/etcd/wal
    snapshot-count: 5000
    heartbeat-interval: 100
    election-timeout: 1000
    quota-backend-bytes: 0
    listen-peer-urls: 'https://192.168.11.71:2380'
    listen-client-urls: 'https://192.168.11.71:2379,http://127.0.0.1:2379'
    max-snapshots: 3
    max-wals: 5
    cors:
    initial-advertise-peer-urls: 'https://192.168.11.71:2380'
    advertise-client-urls: 'https://192.168.11.71:2379'
    discovery:
    discovery-fallback: 'proxy'
    discovery-proxy:
    discovery-srv:
    initial-cluster: 'etcd1=https://192.168.11.71:2380'
    initial-cluster-token: 'etcd-k8s-cluster'
    initial-cluster-state: 'new'
    strict-reconfig-check: false
    enable-v2: true
    enable-pprof: true
    proxy: 'off'
    proxy-failure-wait: 5000
    proxy-refresh-interval: 30000
    proxy-dial-timeout: 1000
    proxy-write-timeout: 5000
    proxy-read-timeout: 0
    client-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    peer-transport-security:
      cert-file: '/etc/kubernetes/pki/etcd/etcd.pem'
      key-file: '/etc/kubernetes/pki/etcd/etcd-key.pem'
      peer-client-cert-auth: true
      trusted-ca-file: '/etc/kubernetes/pki/etcd/etcd-ca.pem'
      auto-tls: true
    debug: false
    log-package-levels:
    log-outputs: [default]
    force-new-cluster: false
    EOF
    
    # 创建etcd服务启动文件
    tee etcd.service << 'EOF'
    [Unit]
    Description=Etcd Service
    Documentation=https://coreos.com/etcd/docs/latest/
    After=network.target
    
    [Service]
    Type=notify
    ExecStart=/usr/local/bin/etcd --config-file=/etc/etcd/etcd-config.yml
    Restart=on-failure
    RestartSec=10
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    Alias=etcd3.service
    EOF
    
    mkdir -p /etc/etcd/ssl
    cp etcd*.pem /etc/etcd/ssl/
    cp etcd-config.yml /etc/etcd/
    cp etcd.service /usr/lib/systemd/system/
    
    mkdir -p /etc/kubernetes/pki/etcd /var/lib/etcd
    ln -s /etc/etcd/ssl/* /etc/kubernetes/pki/etcd/
    
    # 启动ETCD
    systemctl daemon-reload && systemctl enable etcd.service && systemctl start etcd.service
    
    
    # 查看ETCD各节点的健康状态
    ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint health
    
    # 查看成员列表
    ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 member list
    
    # 查看数据库大小以及哪个节点是Leader节点
    ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint status
    
    # 检查ETCD的性能
    ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/etcd/etcd-ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 check perf
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149

    执行结果

    [root@k8s-master1 work]# # 查看ETCD各节点的健康状态
    [root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint health
    +----------------------------+--------+-------------+-------+
    |          ENDPOINT          | HEALTH |    TOOK     | ERROR |
    +----------------------------+--------+-------------+-------+
    | https://192.168.11.71:2379 |   true | 14.286573ms |       |
    +----------------------------+--------+-------------+-------+
    [root@k8s-master1 work]#
    [root@k8s-master1 work]# # 查看成员列表
    [root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 member list
    +------------------+---------+-------+----------------------------+----------------------------+------------+
    |        ID        | STATUS  | NAME  |         PEER ADDRS         |        CLIENT ADDRS        | IS LEARNER |
    +------------------+---------+-------+----------------------------+----------------------------+------------+
    | 5ce02800cf5f635d | started | etcd1 | https://192.168.11.71:2380 | https://192.168.11.71:2379 |      false |
    +------------------+---------+-------+----------------------------+----------------------------+------------+
    [root@k8s-master1 work]#
    [root@k8s-master1 work]# # 查看数据库大小以及哪个节点是Leader节点
    [root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 endpoint status
    +----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
    |          ENDPOINT          |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
    +----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
    | https://192.168.11.71:2379 | 5ce02800cf5f635d |   3.5.4 |   20 kB |      true |      false |         2 |          5 |                  5 |        |
    +----------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
    [root@k8s-master1 work]#
    [root@k8s-master1 work]# # 检查ETCD的性能
    [root@k8s-master1 work]# ETCDCTL_API=3 && /usr/local/bin/etcdctl --write-out=table --cacert=/etc/kubernetes/pki/ca.pem --cert=/etc/kubernetes/pki/etcd/etcd.pem --key=/etc/kubernetes/pki/etcd/etcd-key.pem --endpoints=https://192.168.11.71:2379 check perf
     59 / 60 Booooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooom  !  98.33%PASS: Throughput is 150 writes/s
    PASS: Slowest request took 0.073586s
    PASS: Stddev is 0.002288s
    PASS
    [root@k8s-master1 work]#
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32

    2.5. 部署apiserver

    tee kube-apiserver-csr.json << 'EOF'
    {
      "CN": "kube-apiserver",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "Kubernetes",
          "OU": "Kubernetes-manual"
        }
      ]
    }
    EOF
    
    # 生成apiserver证书
    cfssl gencert   \
      -ca=ca.pem   \
      -ca-key=ca-key.pem   \
      -config=ca-config.json   \
      -hostname=10.96.0.1,127.0.0.1,kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.default.svc.cluster.local,192.168.11.71  \
      -profile=kubernetes   kube-apiserver-csr.json | cfssljson -bare kube-apiserver
    
    
    
    cat > front-proxy-ca-csr.json  << EOF
    {
      "CN": "kubernetes",
      "key": {
         "algo": "rsa",
         "size": 2048
      },
      "ca": {
        "expiry": "876000h"
      }
    }
    EOF
    
    cat > front-proxy-client-csr.json  << EOF
    {
      "CN": "front-proxy-client",
      "key": {
         "algo": "rsa",
         "size": 2048
      }
    }
    EOF
    
    
    # 生成apiserver聚合证书
    cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
    
    cfssl gencert  \
      -ca=front-proxy-ca.pem   \
      -ca-key=front-proxy-ca-key.pem   \
      -config=ca-config.json   \
      -profile=kubernetes   front-proxy-client-csr.json | cfssljson -bare front-proxy-client
    
    # 创建ServiceAccount Key ——secret
    openssl genrsa -out sa.key 2048
    openssl rsa -in sa.key -pubout -out sa.pub
    
    # 这里必须使用cat命令,不能使用tee命令,否者这里的$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')不会被解析
    cat > token.csv << EOF
    $(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF
    
    
    # 创建apiserver配置文件
    tee kube-apiserver.conf << 'EOF'
    KUBE_APISERVER_OPTS="--v=4  \
          --allow-privileged=true  \
          --bind-address=0.0.0.0  \
          --secure-port=6443  \
          --advertise-address=192.168.11.71 \
          --service-cluster-ip-range=10.96.0.0/12  \
          --service-node-port-range=30000-50000  \
          --etcd-servers=https://192.168.11.71:2379 \
          --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem  \
          --etcd-certfile=/etc/etcd/ssl/etcd.pem  \
          --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem  \
          --client-ca-file=/etc/kubernetes/pki/ca.pem  \
          --tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem  \
          --tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem  \
          --kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem  \
          --kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem  \
          --service-account-key-file=/etc/kubernetes/pki/sa.pub  \
          --service-account-signing-key-file=/etc/kubernetes/pki/sa.key  \
          --service-account-issuer=https://kubernetes.default.svc.cluster.local \
          --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname  \
          --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota  \
          --authorization-mode=Node,RBAC  \
          --enable-bootstrap-token-auth=true  \
          --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem  \
          --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem  \
          --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem  \
          --requestheader-allowed-names=aggregator  \
          --requestheader-group-headers=X-Remote-Group  \
          --requestheader-extra-headers-prefix=X-Remote-Extra-  \
          --requestheader-username-headers=X-Remote-User \
          --token-auth-file=/etc/kubernetes/token.csv \
          --enable-aggregator-routing=true"
    EOF
    
    # 创建apiserver服务启动文件
    tee kube-apiserver.service << 'EOF'
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=etcd.service
    Wants=etcd.service
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-apiserver.conf
    ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure
    RestartSec=5
    Type=notify
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    # 移动apiserver配置文件到相应位置
    cp kube-apiserver*.pem /etc/kubernetes/pki/
    cp front-proxy*.pem /etc/kubernetes/pki/
    cp sa.pub sa.key /etc/kubernetes/pki/
    cp kube-apiserver.conf token.csv /etc/kubernetes/
    cp kube-apiserver.service /usr/lib/systemd/system/
    # 所有节点都需要执行
    mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
    
    # 启动服务
    systemctl daemon-reload && systemctl enable kube-apiserver && systemctl start kube-apiserver
    
    
    systemctl status kube-apiserver
    
    curl --insecure https://192.168.11.71:6443/
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145

    执行结果

    [root@k8s-master1 ~]# systemctl status kube-apiserver
    ● kube-apiserver.service - Kubernetes API Server
       Loaded: loaded (/usr/lib/systemd/system/kube-apiserver.service; enabled;                                                                                vendor preset: disabled)
       Active: active (running) since Thu 2022-11-17 22:16:20 CST; 1min 25s ago
         Docs: https://github.com/kubernetes/kubernetes
     Main PID: 52956 (kube-apiserver)
       CGroup: /system.slice/kube-apiserver.service
               └─52956 /usr/local/bin/kube-apiserver --enable-admission-plugi...
    
    Nov 17 22:17:22 k8s-master1 kube-apiserver[52956]: I1117 22:17:22.478545 ...
    Nov 17 22:17:22 k8s-master1 kube-apiserver[52956]: I1117 22:17:22.484930 ...
    Nov 17 22:17:32 k8s-master1 kube-apiserver[52956]: I1117 22:17:32.464028 ...
    Nov 17 22:17:32 k8s-master1 kube-apiserver[52956]: I1117 22:17:32.469795 ...
    Nov 17 22:17:32 k8s-master1 kube-apiserver[52956]: I1117 22:17:32.481656 ...
    Nov 17 22:17:32 k8s-master1 kube-apiserver[52956]: I1117 22:17:32.485503 ...
    Nov 17 22:17:42 k8s-master1 kube-apiserver[52956]: I1117 22:17:42.463590 ...
    Nov 17 22:17:42 k8s-master1 kube-apiserver[52956]: I1117 22:17:42.467854 ...
    Nov 17 22:17:42 k8s-master1 kube-apiserver[52956]: I1117 22:17:42.480078 ...
    Nov 17 22:17:42 k8s-master1 kube-apiserver[52956]: I1117 22:17:42.486241 ...
    Hint: Some lines were ellipsized, use -l to show in full.
    [root@k8s-master1 ~]#
    [root@k8s-master1 ~]# curl --insecure https://192.168.11.71:6443/
    {
      "kind": "Status",
      "apiVersion": "v1",
      "metadata": {},
      "status": "Failure",
      "message": "Unauthorized",
      "reason": "Unauthorized",
      "code": 401
    [root@k8s-master1 ~]#
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31

    2.6. 部署controller-manager

    tee kube-controller-manager-csr.json << 'EOF'
    {
      "CN": "system:kube-controller-manager",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "system:kube-controller-manager",
          "OU": "Kubernetes-manual"
        }
      ]
    }
    EOF
    
    # 创建controller-manager证书
    cfssl gencert \
       -ca=ca.pem \
       -ca-key=ca-key.pem \
       -hostname=127.0.0.1,192.168.11.71 \
       -config=ca-config.json \
       -profile=kubernetes \
       kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
    
    
    # 创建Kube-controller-manager的kubeconfig
    kubectl config set-cluster kubernetes \
         --certificate-authority=ca.pem \
         --embed-certs=true \
         --server=https://192.168.11.71:6443 \
         --kubeconfig=kube-controller-manager.kubeconfig
    
    # 设置上下文参数
    kubectl config set-context system:kube-controller-manager@kubernetes \
        --cluster=kubernetes \
        --user=system:kube-controller-manager \
        --kubeconfig=kube-controller-manager.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials system:kube-controller-manager \
         --client-certificate=kube-controller-manager.pem \
         --client-key=kube-controller-manager-key.pem \
         --embed-certs=true \
         --kubeconfig=kube-controller-manager.kubeconfig
    
    # 设置当前上下文
    kubectl config use-context system:kube-controller-manager@kubernetes \
         --kubeconfig=kube-controller-manager.kubeconfig
    
    # 创建controller-manager配置文件
    tee kube-controller-manager.conf << 'EOF'
    KUBE_CONTROLLER_MANAGER_OPTS="--v=4 \
          --bind-address=127.0.0.1 \
          --root-ca-file=/etc/kubernetes/pki/ca.pem \
          --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
          --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
          --service-account-private-key-file=/etc/kubernetes/pki/sa.key \
          --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
          --leader-elect=true \
          --use-service-account-credentials=true \
          --node-monitor-grace-period=40s \
          --node-monitor-period=5s \
          --pod-eviction-timeout=2m0s \
          --controllers=*,bootstrapsigner,tokencleaner \
          --allocate-node-cidrs=true \
          --service-cluster-ip-range=10.96.0.0/12 \
          --cluster-cidr=172.16.0.0/12 \
          --node-cidr-mask-size-ipv4=24 \
          --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem"
    EOF
    
    # 创建服务启动文件
    tee kube-controller-manager.service << 'EOF'
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes
    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-controller-manager.conf
    ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure
    RestartSec=5
    [Install]
    WantedBy=multi-user.target
    EOF
    
    
    # 复制配置文件到对应目录
    cp kube-controller-manager*.pem /etc/kubernetes/pki/
    cp kube-controller-manager.kubeconfig /etc/kubernetes/
    cp kube-controller-manager.conf /etc/kubernetes/
    cp kube-controller-manager.service /usr/lib/systemd/system/
    
    
    systemctl daemon-reload  &&systemctl enable kube-controller-manager && systemctl start kube-controller-manager && systemctl status kube-controller-manager
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100

    2.7. 部署scheduler

    tee kube-scheduler-csr.json << 'EOF'
    {
      "CN": "system:kube-scheduler",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "system:kube-scheduler",
          "OU": "Kubernetes-manual"
        }
      ]
    }
    EOF
    
    # 创建证书
    cfssl gencert \
       -ca=ca.pem \
       -ca-key=ca-key.pem \
       -config=ca-config.json \
       -profile=kubernetes \
       kube-scheduler-csr.json | cfssljson -bare kube-scheduler
    
    
    # 创建kube-scheduler的kubeconfig文件
    kubectl config set-cluster kubernetes \
         --certificate-authority=ca.pem \
         --embed-certs=true \
         --server=https://192.168.11.71:6443 \
         --kubeconfig=kube-scheduler.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials system:kube-scheduler \
         --client-certificate=kube-scheduler.pem \
         --client-key=kube-scheduler-key.pem \
         --embed-certs=true \
         --kubeconfig=kube-scheduler.kubeconfig
    
    # 设置上下文参数
    kubectl config set-context system:kube-scheduler@kubernetes \
         --cluster=kubernetes \
         --user=system:kube-scheduler \
         --kubeconfig=kube-scheduler.kubeconfig
    
    # 设置当前上下文
    kubectl config use-context system:kube-scheduler@kubernetes \
         --kubeconfig=kube-scheduler.kubeconfig
    
    
    # 创建kube-scheduler的配置文件
    tee kube-scheduler.conf << 'EOF'
    KUBE_SCHEDULER_OPTS="--v=4 \
          --bind-address=127.0.0.1 \
          --leader-elect=true \
          --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig"
    EOF
    
    
    # 创建kube-scheduler服务启动文件
    tee kube-scheduler.service << 'EOF'
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/etc/kubernetes/kube-scheduler.conf
    ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure
    RestartSec=5
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    
    # 拷贝kube-scheduler到合适的集群节点中
    cp kube-scheduler*.pem /etc/kubernetes/pki/
    cp kube-scheduler.kubeconfig /etc/kubernetes/
    cp kube-scheduler.conf /etc/kubernetes/
    cp kube-scheduler.service /usr/lib/systemd/system/
    
    
    systemctl daemon-reload &&  systemctl enable kube-scheduler && systemctl start kube-scheduler && systemctl status kube-scheduler
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88

    2.8. 部署kubectl

    tee admin-csr.json << 'EOF'
    {
      "CN": "admin",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "system:masters",
          "OU": "Kubernetes-manual"
        }
      ]
    }
    EOF
    
    # 生成admin证书
    cfssl gencert \
        -ca=ca.pem \
        -ca-key=ca-key.pem \
        -config=ca-config.json \
        -profile=kubernetes \
        admin-csr.json | cfssljson -bare admin
    
    
    kubectl config set-cluster kubernetes \
      --certificate-authority=ca.pem \
      --embed-certs=true \
      --server=https://192.168.11.71:6443 \
      --kubeconfig=admin.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials kubernetes-admin \
      --client-certificate=admin.pem \
      --client-key=admin-key.pem \
      --embed-certs=true \
      --kubeconfig=admin.kubeconfig
    
    # 设置上下文参数
    kubectl config set-context kubernetes-admin@kubernetes \
      --cluster=kubernetes \
      --user=kubernetes-admin \
      --kubeconfig=admin.kubeconfig
    
    # 设置当前上下文
    kubectl config use-context kubernetes-admin@kubernetes \
      --kubeconfig=admin.kubeconfig
    
    
    
    mkdir -p /root/.kube; cp admin.kubeconfig /root/.kube/config
    cp admin*.pem /etc/kubernetes/pki
    
    yum install -y bash-completion
    source /usr/share/bash-completion/bash_completion
    source <(kubectl completion bash)
    kubectl completion bash > ~/.kube/completion.bash.inc
    source '/root/.kube/completion.bash.inc'
    source $HOME/.bash_profile
    
    
    # 查看集群状态
    kubectl cluster-info
    kubectl get componentstatuses
    kubectl get all --all-namespaces
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69

    2.9. 安装containerd

    #创建服务启动文件
    tee /etc/systemd/system/containerd.service << 'EOF'
    [Unit]
    Description=containerd container runtime
    Documentation=https://containerd.io
    After=network.target local-fs.target
    
    [Service]
    ExecStartPre=-/sbin/modprobe overlay
    ExecStart=/usr/local/bin/containerd
    Type=notify
    Delegate=yes
    KillMode=process
    Restart=always
    RestartSec=5
    LimitNPROC=infinity
    LimitCORE=infinity
    LimitNOFILE=infinity
    TasksMax=infinity
    OOMScoreAdjust=-999
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    # 配置containerd所需要的模块
    cat <<EOF | tee /etc/modules-load.d/containerd.conf
    overlay
    br_netfilter
    EOF
    
    # 加载overlay, br_netfilter模块
    systemctl restart systemd-modules-load.service
    
    # 配置contaienrd所需要的内核参数
    cat <<EOF | tee /etc/sysctl.d/99-kubernetes-cri.conf
    net.bridge.bridge-nf-call-iptables  = 1
    net.ipv4.ip_forward                 = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    EOF
    
    # 加载内核参数
    sysctl --system
    
    # 创建默认配置文件
    mkdir -p /etc/containerd
    # 创建containerd默认配置文件
    #containerd config default | tee /etc/containerd/config.toml
    #sed -i "s#SystemdCgroup = false#SystemdCgroup = true#" /etc/containerd/config.toml
    #sed -i "s#registry.k8s.io#registry.aliyuncs.com/google_containers#" /etc/containerd/config.toml
    
    # 直接生成配置文件,注意以下版本,这里生成的是containerd 1.6.10的配置,其它版本的配置应该生成默认的配置,然后进行更改
    tee /etc/containerd/config.toml << 'EOF'
    disabled_plugins = []
    imports = []
    oom_score = 0
    plugin_dir = ""
    required_plugins = []
    root = "/var/lib/containerd"
    state = "/run/containerd"
    temp = ""
    version = 2
    
    [cgroup]
      path = ""
    
    [debug]
      address = ""
      format = ""
      gid = 0
      level = ""
      uid = 0
    
    [grpc]
      address = "/run/containerd/containerd.sock"
      gid = 0
      max_recv_message_size = 16777216
      max_send_message_size = 16777216
      tcp_address = ""
      tcp_tls_ca = ""
      tcp_tls_cert = ""
      tcp_tls_key = ""
      uid = 0
    
    [metrics]
      address = ""
      grpc_histogram = false
    
    [plugins]
    
      [plugins."io.containerd.gc.v1.scheduler"]
        deletion_threshold = 0
        mutation_threshold = 100
        pause_threshold = 0.02
        schedule_delay = "0s"
        startup_delay = "100ms"
    
      [plugins."io.containerd.grpc.v1.cri"]
        device_ownership_from_security_context = false
        disable_apparmor = false
        disable_cgroup = false
        disable_hugetlb_controller = true
        disable_proc_mount = false
        disable_tcp_service = true
        enable_selinux = false
        enable_tls_streaming = false
        enable_unprivileged_icmp = false
        enable_unprivileged_ports = false
        ignore_image_defined_volumes = false
        max_concurrent_downloads = 3
        max_container_log_line_size = 16384
        netns_mounts_under_state_dir = false
        restrict_oom_score_adj = false
        sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
        selinux_category_range = 1024
        stats_collect_period = 10
        stream_idle_timeout = "4h0m0s"
        stream_server_address = "127.0.0.1"
        stream_server_port = "0"
        systemd_cgroup = false
        tolerate_missing_hugetlb_controller = true
        unset_seccomp_profile = ""
    
        [plugins."io.containerd.grpc.v1.cri".cni]
          bin_dir = "/opt/cni/bin"
          conf_dir = "/etc/cni/net.d"
          conf_template = ""
          ip_pref = ""
          max_conf_num = 1
    
        [plugins."io.containerd.grpc.v1.cri".containerd]
          default_runtime_name = "runc"
          disable_snapshot_annotations = true
          discard_unpacked_layers = false
          ignore_rdt_not_enabled_errors = false
          no_pivot = false
          snapshotter = "overlayfs"
    
          [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
            base_runtime_spec = ""
            cni_conf_dir = ""
            cni_max_conf_num = 0
            container_annotations = []
            pod_annotations = []
            privileged_without_host_devices = false
            runtime_engine = ""
            runtime_path = ""
            runtime_root = ""
            runtime_type = ""
    
            [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
    
          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
    
            [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
              base_runtime_spec = ""
              cni_conf_dir = ""
              cni_max_conf_num = 0
              container_annotations = []
              pod_annotations = []
              privileged_without_host_devices = false
              runtime_engine = ""
              runtime_path = ""
              runtime_root = ""
              runtime_type = "io.containerd.runc.v2"
    
              [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
                BinaryName = ""
                CriuImagePath = ""
                CriuPath = ""
                CriuWorkPath = ""
                IoGid = 0
                IoUid = 0
                NoNewKeyring = false
                NoPivotRoot = false
                Root = ""
                ShimCgroup = ""
                SystemdCgroup = true
    
          [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
            base_runtime_spec = ""
            cni_conf_dir = ""
            cni_max_conf_num = 0
            container_annotations = []
            pod_annotations = []
            privileged_without_host_devices = false
            runtime_engine = ""
            runtime_path = ""
            runtime_root = ""
            runtime_type = ""
    
            [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
    
        [plugins."io.containerd.grpc.v1.cri".image_decryption]
          key_model = "node"
    
        [plugins."io.containerd.grpc.v1.cri".registry]
          config_path = ""
    
          [plugins."io.containerd.grpc.v1.cri".registry.auths]
    
          [plugins."io.containerd.grpc.v1.cri".registry.configs]
    
          [plugins."io.containerd.grpc.v1.cri".registry.headers]
    
          [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
            [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
              endpoint = ["https://docker.mirrors.ustc.edu.cn", "http://hub-mirror.c.163.com"]
            [plugins."io.containerd.grpc.v1.cri".registry.mirrors."gcr.io"]
              endpoint = ["https://gcr.mirrors.ustc.edu.cn"]
            [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
              endpoint = ["https://gcr.mirrors.ustc.edu.cn/google-containers/"]
            [plugins."io.containerd.grpc.v1.cri".registry.mirrors."quay.io"]
              endpoint = ["https://quay.mirrors.ustc.edu.cn"]
    
        [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
          tls_cert_file = ""
          tls_key_file = ""
    
      [plugins."io.containerd.internal.v1.opt"]
        path = "/opt/containerd"
    
      [plugins."io.containerd.internal.v1.restart"]
        interval = "10s"
    
      [plugins."io.containerd.internal.v1.tracing"]
        sampling_ratio = 1.0
        service_name = "containerd"
    
      [plugins."io.containerd.metadata.v1.bolt"]
        content_sharing_policy = "shared"
    
      [plugins."io.containerd.monitor.v1.cgroups"]
        no_prometheus = false
    
      [plugins."io.containerd.runtime.v1.linux"]
        no_shim = false
        runtime = "runc"
        runtime_root = ""
        shim = "containerd-shim"
        shim_debug = false
    
      [plugins."io.containerd.runtime.v2.task"]
        platforms = ["linux/amd64"]
        sched_core = false
    
      [plugins."io.containerd.service.v1.diff-service"]
        default = ["walking"]
    
      [plugins."io.containerd.service.v1.tasks-service"]
        rdt_config_file = ""
    
      [plugins."io.containerd.snapshotter.v1.aufs"]
        root_path = ""
    
      [plugins."io.containerd.snapshotter.v1.btrfs"]
        root_path = ""
    
      [plugins."io.containerd.snapshotter.v1.devmapper"]
        async_remove = false
        base_image_size = ""
        discard_blocks = false
        fs_options = ""
        fs_type = ""
        pool_name = ""
        root_path = ""
    
      [plugins."io.containerd.snapshotter.v1.native"]
        root_path = ""
    
      [plugins."io.containerd.snapshotter.v1.overlayfs"]
        root_path = ""
        upperdir_label = false
    
      [plugins."io.containerd.snapshotter.v1.zfs"]
        root_path = ""
    
      [plugins."io.containerd.tracing.processor.v1.otlp"]
        endpoint = ""
        insecure = false
        protocol = ""
    
    [proxy_plugins]
    
    [stream_processors]
    
      [stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
        accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
        args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
        env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
        path = "ctd-decoder"
        returns = "application/vnd.oci.image.layer.v1.tar"
    
      [stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
        accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
        args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
        env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
        path = "ctd-decoder"
        returns = "application/vnd.oci.image.layer.v1.tar+gzip"
    
    [timeouts]
      "io.containerd.timeout.bolt.open" = "0s"
      "io.containerd.timeout.shim.cleanup" = "5s"
      "io.containerd.timeout.shim.load" = "5s"
      "io.containerd.timeout.shim.shutdown" = "3s"
      "io.containerd.timeout.task.state" = "2s"
    
    [ttrpc]
      address = ""
      gid = 0
      uid = 0
    EOF
    
    
    
    systemctl daemon-reload
    systemctl enable --now containerd
    systemctl restart  containerd
    
    # 验证containerd是否安装成功
    crictl info
    
    # 验证是否可以下载镜像
    ctr images pull docker.io/library/redis:alpine
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223
    • 224
    • 225
    • 226
    • 227
    • 228
    • 229
    • 230
    • 231
    • 232
    • 233
    • 234
    • 235
    • 236
    • 237
    • 238
    • 239
    • 240
    • 241
    • 242
    • 243
    • 244
    • 245
    • 246
    • 247
    • 248
    • 249
    • 250
    • 251
    • 252
    • 253
    • 254
    • 255
    • 256
    • 257
    • 258
    • 259
    • 260
    • 261
    • 262
    • 263
    • 264
    • 265
    • 266
    • 267
    • 268
    • 269
    • 270
    • 271
    • 272
    • 273
    • 274
    • 275
    • 276
    • 277
    • 278
    • 279
    • 280
    • 281
    • 282
    • 283
    • 284
    • 285
    • 286
    • 287
    • 288
    • 289
    • 290
    • 291
    • 292
    • 293
    • 294
    • 295
    • 296
    • 297
    • 298
    • 299
    • 300
    • 301
    • 302
    • 303
    • 304
    • 305
    • 306
    • 307
    • 308
    • 309
    • 310
    • 311
    • 312
    • 313
    • 314
    • 315
    • 316
    • 317
    • 318
    • 319
    • 320
    • 321
    • 322
    • 323
    • 324
    • 325

    2.10. 部署kubelet

    cat > bootstrap.secret.yaml << EOF
    apiVersion: v1
    kind: Secret
    metadata:
      name: bootstrap-token-c8ad9c
      namespace: kube-system
    type: bootstrap.kubernetes.io/token
    stringData:
      description: "The default bootstrap token generated by 'kubelet '."
      token-id: c8ad9c
      token-secret: 2e4d610cf3e7426e
      usage-bootstrap-authentication: "true"
      usage-bootstrap-signing: "true"
      auth-extra-groups:  system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
     
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: kubelet-bootstrap
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:node-bootstrapper
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: Group
      name: system:bootstrappers:default-node-token
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: node-autoapprove-bootstrap
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: Group
      name: system:bootstrappers:default-node-token
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: node-autoapprove-certificate-rotation
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: Group
      name: system:nodes
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
      name: system:kube-apiserver-to-kubelet
    rules:
      - apiGroups:
          - ""
        resources:
          - nodes/proxy
          - nodes/stats
          - nodes/log
          - nodes/spec
          - nodes/metrics
        verbs:
          - "*"
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: system:kube-apiserver
      namespace: ""
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:kube-apiserver-to-kubelet
    subjects:
      - apiGroup: rbac.authorization.k8s.io
        kind: User
        name: kube-apiserver
    EOF
    
    kubectl config set-cluster kubernetes     \
      --certificate-authority=ca.pem     \
      --embed-certs=true  \
      --server=https://192.168.11.71:6443     \
      --kubeconfig=bootstrap-kubelet.kubeconfig
    
    ## token的位置在bootstrap.secret.yaml,如果修改的话到这个文件修改
    kubectl config set-credentials tls-bootstrap-token-user  \
      --token=c8ad9c.2e4d610cf3e7426e \
      --kubeconfig=bootstrap-kubelet.kubeconfig
    
    kubectl config set-context tls-bootstrap-token-user@kubernetes     \
      --cluster=kubernetes     \
      --user=tls-bootstrap-token-user     \
      --kubeconfig=bootstrap-kubelet.kubeconfig
    
    kubectl config use-context tls-bootstrap-token-user@kubernetes     \
      --kubeconfig=bootstrap-kubelet.kubeconfig
    
    
    cat > kubelet-conf.yml <<EOF
    apiVersion: kubelet.config.k8s.io/v1beta1
    kind: KubeletConfiguration
    address: 0.0.0.0
    port: 10250
    readOnlyPort: 10255
    authentication:
      anonymous:
        enabled: false
      webhook:
        cacheTTL: 2m0s
        enabled: true
      x509:
        clientCAFile: /etc/kubernetes/pki/ca.pem
    authorization:
      mode: Webhook
      webhook:
        cacheAuthorizedTTL: 5m0s
        cacheUnauthorizedTTL: 30s
    cgroupDriver: systemd
    cgroupsPerQOS: true
    clusterDNS:
    - 10.96.0.10
    clusterDomain: cluster.local
    containerLogMaxFiles: 5
    containerLogMaxSize: 10Mi
    contentType: application/vnd.kubernetes.protobuf
    cpuCFSQuota: true
    cpuManagerPolicy: none
    cpuManagerReconcilePeriod: 10s
    enableControllerAttachDetach: true
    enableDebuggingHandlers: true
    enforceNodeAllocatable:
    - pods
    eventBurst: 10
    eventRecordQPS: 5
    evictionHard:
      imagefs.available: 15%
      memory.available: 100Mi
      nodefs.available: 10%
      nodefs.inodesFree: 5%
    evictionPressureTransitionPeriod: 5m0s
    failSwapOn: true
    fileCheckFrequency: 20s
    hairpinMode: promiscuous-bridge
    healthzBindAddress: 127.0.0.1
    healthzPort: 10248
    httpCheckFrequency: 20s
    imageGCHighThresholdPercent: 85
    imageGCLowThresholdPercent: 80
    imageMinimumGCAge: 2m0s
    iptablesDropBit: 15
    iptablesMasqueradeBit: 14
    kubeAPIBurst: 10
    kubeAPIQPS: 5
    makeIPTablesUtilChains: true
    maxOpenFiles: 1000000
    maxPods: 110
    nodeStatusUpdateFrequency: 10s
    oomScoreAdj: -999
    podPidsLimit: -1
    registryBurst: 10
    registryPullQPS: 5
    resolvConf: /etc/resolv.conf
    rotateCertificates: true
    runtimeRequestTimeout: 2m0s
    serializeImagePulls: true
    staticPodPath: /etc/kubernetes/manifests
    streamingConnectionIdleTimeout: 4h0m0s
    syncFrequency: 1m0s
    volumeStatsAggPeriod: 1m0s
    EOF
    
    # 创建kubelet服务启动配置文件
    tee kubelet.service << 'EOF'
    [Unit]
    Description=Kubernetes Kubelet
    Documentation=https://github.com/kubernetes/kubernetes
    After=containerd.service
    Requires=containerd.service
    
    [Service]
    WorkingDirectory=/var/lib/kubelet
    ExecStart=/usr/local/bin/kubelet \
      --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \
      --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
      --config=/etc/kubernetes/kubelet-conf.yml \
      --container-runtime-endpoint=unix:///run/containerd/containerd.sock \
      --node-labels=node.kubernetes.io/node= \
      --v=8
    Restart=on-failure
    RestartSec=5
    
    [Install]
    WantedBy=multi-user.target
    EOF
    
    
    mkdir -p /etc/kubernetes/manifests/ /etc/systemd/system/kubelet.service.d /var/lib/kubelet /var/log/kubernetes
    cp kubelet-conf.yml /etc/kubernetes/
    cp kubelet.service /usr/lib/systemd/system/
    cp bootstrap-kubelet.kubeconfig /etc/kubernetes/
    
    
    kubectl create -f bootstrap.secret.yaml
    
    
    systemctl daemon-reload && systemctl enable kubelet &&  systemctl start kubelet && systemctl status kubelet
    
    echo "sleep 20s" && sleep 20
    kubectl get nodes -owide
    kubectl get csr
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223

    执行结果,注意kubelet的启动需要一定时间,等个一两分钟再执行此命令如果还不是该结果,那么多半是配置哪里出问题了

    [root@k8s-master1 work]#
    [root@k8s-master1 work]# kubectl get nodes -owide
    NAME          STATUS   ROLES    AGE   VERSION    INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION              CONTAINER-RUNTIME
    k8s-master1   Ready    <none>   53s   v1.21.14   192.168.11.71   <none>        CentOS Linux 7 (Core)   6.0.7-1.el7.elrepo.x86_64   containerd://1.6.10
    [root@k8s-master1 work]# kubectl get csr
    NAME        AGE     SIGNERNAME                                    REQUESTOR           CONDITION
    csr-z56vc   2m12s   kubernetes.io/kube-apiserver-client-kubelet   kubelet-bootstrap   Approved,Issued
    [root@k8s-master1 work]#
    [root@k8s-master1 work]#
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10

    2.11. 部署proxy

    tee kube-proxy-csr.json << 'EOF'
    {
      "CN": "system:kube-proxy",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "Beijing",
          "L": "Beijing",
          "O": "system:kube-proxy",
          "OU": "Kubernetes-manual"
        }
      ]
    }
    EOF
    
    
    cfssl gencert \
       -ca=ca.pem \
       -ca-key=ca-key.pem \
       -config=ca-config.json \
       -profile=kubernetes \
       kube-proxy-csr.json | cfssljson -bare kube-proxy
    
    
    kubectl config set-cluster kubernetes     \
      --certificate-authority=ca.pem     \
      --embed-certs=true     \
      --server=https://192.168.11.71:6443     \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-credentials kube-proxy  \
      --client-certificate=kube-proxy.pem     \
      --client-key=kube-proxy-key.pem     \
      --embed-certs=true     \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-context kube-proxy@kubernetes  \
      --cluster=kubernetes     \
      --user=kube-proxy     \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config use-context kube-proxy@kubernetes \
      --kubeconfig=kube-proxy.kubeconfig
    
    
    tee kube-proxy.yaml << 'EOF'
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    bindAddress: 0.0.0.0
    clientConnection:
      acceptContentTypes: ""
      burst: 10
      contentType: application/vnd.kubernetes.protobuf
      kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
      qps: 5
    clusterCIDR: 172.16.0.0/12
    configSyncPeriod: 15m0s
    conntrack:
      maxPerCore: 32768
      min: 131072
      tcpCloseWaitTimeout: 1h0m0s
      tcpEstablishedTimeout: 24h0m0s
    enableProfiling: false
    healthzBindAddress: 0.0.0.0:10256
    hostnameOverride: ""
    iptables:
      masqueradeAll: false
      masqueradeBit: 14
      minSyncPeriod: 0s
      syncPeriod: 30s
    ipvs:
      masqueradeAll: true
      minSyncPeriod: 5s
      scheduler: "rr"
      syncPeriod: 30s
    kind: KubeProxyConfiguration
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"
    nodePortAddresses: null
    oomScoreAdj: -999
    portRange: ""
    udpIdleTimeout: 250ms
    EOF
    
    
    tee kube-proxy.service << 'EOF'
    [Unit]
    Description=Kubernetes Kube-Proxy Server
    Documentation=https://github.com/kubernetes/kubernetes
    After=network.target
    
    [Service]
    WorkingDirectory=/var/lib/kube-proxy
    ExecStart=/usr/local/bin/kube-proxy \
      --config=/etc/kubernetes/kube-proxy.yaml \
      --v=8
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    
    EOF
    
    mkdir -p /var/lib/kube-proxy
    cp kube-proxy*.pem /etc/kubernetes/pki/
    cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/
    cp kube-proxy.service /usr/lib/systemd/system/
    
    
    systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy && systemctl status kube-proxy
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116

    2.12. 部署网络组件calico

    网友反映说 centos7 要升级libseccomp 不然 无法安装calico

    curl https://raw.githubusercontent.com/projectcalico/calico/v3.24.1/manifests/calico.yaml -O
    
    sed -i 's/# - name: CALICO_IPV4POOL_CIDR/- name: CALICO_IPV4POOL_CIDR/g' calico.yaml
    sed -i 's/#   value: "192.168.0.0\/16"/  value: "172.16.0.0\/12"/g' calico.yaml
    sed -i 's/"type": "calico-ipam"/"type": "calico-ipam",\n              "assign_ipv4": "true"/g' calico.yaml
    
    #创建calico组件
    kubectl apply -f calico.yaml
    
    #查看calico pod是否成功创建
    kubectl get pods -n kube-system -owide
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12

    执行过程

    [root@k8s-master1 work]#
    [root@k8s-master1 work]#
    [root@k8s-master1 work]# kubectl apply -f calico.yaml
    configmap/calico-config created
    customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
    customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
    clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
    clusterrole.rbac.authorization.k8s.io/calico-node created
    clusterrolebinding.rbac.authorization.k8s.io/calico-node created
    daemonset.apps/calico-node created
    serviceaccount/calico-node created
    deployment.apps/calico-kube-controllers created
    serviceaccount/calico-kube-controllers created
    Warning: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget
    poddisruptionbudget.policy/calico-kube-controllers created
    [root@k8s-master1 work]#
    [root@k8s-master1 work]#
    [root@k8s-master1 work]#
    [root@k8s-master1 work]#
    [root@k8s-master1 work]#
    [root@k8s-master1 work]#
    [root@k8s-master1 work]# kubectl get pods -n kube-system -owide
    NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES
    calico-kube-controllers-7cc8dd57d9-dpcqw   1/1     Running   0          3m16s   10.88.0.2       k8s-node1   <none>           <none>
    calico-node-hr9f5                          1/1     Running   0          3m15s   192.168.11.74   k8s-node1   <none>           <none>
    [root@k8s-master1 work]#
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40

    2.13. 部署CoreDNS

    tee coredns.yaml  << 'EOF'
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: coredns
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
      name: system:coredns
    rules:
      - apiGroups:
        - ""
        resources:
        - endpoints
        - services
        - pods
        - namespaces
        verbs:
        - list
        - watch
      - apiGroups:
        - discovery.k8s.io
        resources:
        - endpointslices
        verbs:
        - list
        - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
      name: system:coredns
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:coredns
    subjects:
    - kind: ServiceAccount
      name: coredns
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: coredns
      namespace: kube-system
    data:
      Corefile: |
        .:53 {
            errors
            health {
              lameduck 5s
            }
            ready
            kubernetes cluster.local in-addr.arpa ip6.arpa {
              fallthrough in-addr.arpa ip6.arpa
            }
            prometheus :9153
            forward . /etc/resolv.conf {
              max_concurrent 1000
            }
            cache 30
            loop
            reload
            loadbalance
        }
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: coredns
      namespace: kube-system
      labels:
        k8s-app: kube-dns
        kubernetes.io/name: "CoreDNS"
    spec:
      # replicas: not specified here:
      # 1. Default is 1.
      # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
      strategy:
        type: RollingUpdate
        rollingUpdate:
          maxUnavailable: 1
      selector:
        matchLabels:
          k8s-app: kube-dns
      template:
        metadata:
          labels:
            k8s-app: kube-dns
        spec:
          priorityClassName: system-cluster-critical
          serviceAccountName: coredns
          tolerations:
            - key: "CriticalAddonsOnly"
              operator: "Exists"
          nodeSelector:
            kubernetes.io/os: linux
          affinity:
             podAntiAffinity:
               preferredDuringSchedulingIgnoredDuringExecution:
               - weight: 100
                 podAffinityTerm:
                   labelSelector:
                     matchExpressions:
                       - key: k8s-app
                         operator: In
                         values: ["kube-dns"]
                   topologyKey: kubernetes.io/hostname
          containers:
          - name: coredns
            image: registry.cn-beijing.aliyuncs.com/dotbalo/coredns:1.8.6 
            imagePullPolicy: IfNotPresent
            resources:
              limits:
                memory: 170Mi
              requests:
                cpu: 100m
                memory: 70Mi
            args: [ "-conf", "/etc/coredns/Corefile" ]
            volumeMounts:
            - name: config-volume
              mountPath: /etc/coredns
              readOnly: true
            ports:
            - containerPort: 53
              name: dns
              protocol: UDP
            - containerPort: 53
              name: dns-tcp
              protocol: TCP
            - containerPort: 9153
              name: metrics
              protocol: TCP
            securityContext:
              allowPrivilegeEscalation: false
              capabilities:
                add:
                - NET_BIND_SERVICE
                drop:
                - all
              readOnlyRootFilesystem: true
            livenessProbe:
              httpGet:
                path: /health
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            readinessProbe:
              httpGet:
                path: /ready
                port: 8181
                scheme: HTTP
          dnsPolicy: Default
          volumes:
            - name: config-volume
              configMap:
                name: coredns
                items:
                - key: Corefile
                  path: Corefile
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: kube-dns
      namespace: kube-system
      annotations:
        prometheus.io/port: "9153"
        prometheus.io/scrape: "true"
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
        kubernetes.io/name: "CoreDNS"
    spec:
      selector:
        k8s-app: kube-dns
      clusterIP: 10.96.0.10 
      ports:
      - name: dns
        port: 53
        protocol: UDP
      - name: dns-tcp
        port: 53
        protocol: TCP
      - name: metrics
        port: 9153
        protocol: TCP
    EOF
    
    kubectl apply -f coredns.yaml
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203

    2.14. 安装Metric Server

    tee metrics-server.yaml << 'EOF'
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        k8s-app: metrics-server
        rbac.authorization.k8s.io/aggregate-to-admin: "true"
        rbac.authorization.k8s.io/aggregate-to-edit: "true"
        rbac.authorization.k8s.io/aggregate-to-view: "true"
      name: system:aggregated-metrics-reader
    rules:
    - apiGroups:
      - metrics.k8s.io
      resources:
      - pods
      - nodes
      verbs:
      - get
      - list
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        k8s-app: metrics-server
      name: system:metrics-server
    rules:
    - apiGroups:
      - ""
      resources:
      - pods
      - nodes
      - nodes/stats
      - namespaces
      - configmaps
      verbs:
      - get
      - list
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server-auth-reader
      namespace: kube-system
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: extension-apiserver-authentication-reader
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server:system:auth-delegator
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:auth-delegator
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      labels:
        k8s-app: metrics-server
      name: system:metrics-server
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:metrics-server
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server
      namespace: kube-system
    spec:
      ports:
      - name: https
        port: 443
        protocol: TCP
        targetPort: https
      selector:
        k8s-app: metrics-server
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server
      namespace: kube-system
    spec:
      selector:
        matchLabels:
          k8s-app: metrics-server
      strategy:
        rollingUpdate:
          maxUnavailable: 0
      template:
        metadata:
          labels:
            k8s-app: metrics-server
        spec:
          containers:
          - args:
            - --cert-dir=/tmp
            - --secure-port=4443
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
            - --kubelet-use-node-status-port
            - --metric-resolution=15s
            - --kubelet-insecure-tls
            - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem # change to front-proxy-ca.crt for kubeadm
            - --requestheader-username-headers=X-Remote-User
            - --requestheader-group-headers=X-Remote-Group
            - --requestheader-extra-headers-prefix=X-Remote-Extra-
            image: registry.cn-beijing.aliyuncs.com/dotbalo/metrics-server:0.5.0
            imagePullPolicy: IfNotPresent
            livenessProbe:
              failureThreshold: 3
              httpGet:
                path: /livez
                port: https
                scheme: HTTPS
              periodSeconds: 10
            name: metrics-server
            ports:
            - containerPort: 4443
              name: https
              protocol: TCP
            readinessProbe:
              failureThreshold: 3
              httpGet:
                path: /readyz
                port: https
                scheme: HTTPS
              initialDelaySeconds: 20
              periodSeconds: 10
            resources:
              requests:
                cpu: 100m
                memory: 200Mi
            securityContext:
              readOnlyRootFilesystem: true
              runAsNonRoot: true
              runAsUser: 1000
            volumeMounts:
            - mountPath: /tmp
              name: tmp-dir
            - name: ca-ssl
              mountPath: /etc/kubernetes/pki
          nodeSelector:
            kubernetes.io/os: linux
          priorityClassName: system-cluster-critical
          serviceAccountName: metrics-server
          volumes:
          - emptyDir: {}
            name: tmp-dir
          - name: ca-ssl
            hostPath:
              path: /etc/kubernetes/pki
    
    ---
    apiVersion: apiregistration.k8s.io/v1
    kind: APIService
    metadata:
      labels:
        k8s-app: metrics-server
      name: v1beta1.metrics.k8s.io
    spec:
      group: metrics.k8s.io
      groupPriorityMinimum: 100
      insecureSkipTLSVerify: true
      service:
        name: metrics-server
        namespace: kube-system
      version: v1beta1
      versionPriority: 100
    EOF
    
    kubectl  apply -f metrics-server.yaml
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208

    2.15. 集群验证

    tee deploy.yaml << 'EOF'
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx-deployment
    spec:
      selector:
        matchLabels:
          app: nginx
      replicas: 3
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: nginx:alpine
            ports:
            - containerPort: 80
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx-service
    spec:
      selector:
        app: nginx
      ports:
      - protocol: TCP
        port: 80
        targetPort: 80
        nodePort: 30080
      type: NodePort
    EOF
    
    kubectl apply -f deploy.yaml
    
    # 查看
    kubectl get pod
    kubectl get svc
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43

    2.16. 用pod解析默认命名空间中的kubernetes

    kubectl get svc
    #NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
    #kubernetes   ClusterIP   10.96.0.1            443/TCP   17h
    
    kubectl exec  busybox -n default -- nslookup kubernetes
    #3Server:    10.96.0.10
    #Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
    
    #Name:      kubernetes
    #Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10

    2.16.1. 测试跨命名空间是否可以解析

    kubectl exec  busybox -n default -- nslookup kube-dns.kube-system
    #Server:    10.96.0.10
    #Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
    
    #Name:      kube-dns.kube-system
    #Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    2.16.2. 每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53

    telnet 10.96.0.1 443
    Trying 10.96.0.1...
    Connected to 10.96.0.1.
    Escape character is '^]'.
    
     telnet 10.96.0.10 53
    Trying 10.96.0.10...
    Connected to 10.96.0.10.
    Escape character is '^]'.
    
    curl 10.96.0.10:53
    curl: (52) Empty reply from server
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12

    2.16.3. Pod和Pod之前要能通

    kubectl get po -owide
    NAME      READY   STATUS    RESTARTS   AGE   IP              NODE         NOMINATED NODE   READINESS GATES
    busybox   1/1     Running   0          17m   172.27.14.193   k8s-node02   <none>           <none>
    
     kubectl get po -n kube-system -owide
    NAME                                       READY   STATUS    RESTARTS      AGE   IP               NODE           NOMINATED NODE   READINESS GATES
    calico-kube-controllers-5dffd5886b-4blh6   1/1     Running   0             77m   172.25.244.193   k8s-master01   <none>           <none>
    calico-node-fvbdq                          1/1     Running   1 (75m ago)   77m   192.168.1.61     k8s-master01   <none>           <none>
    calico-node-g8nqd                          1/1     Running   0             77m   192.168.1.64     k8s-node01     <none>           <none>
    calico-node-mdps8                          1/1     Running   0             77m   192.168.1.65     k8s-node02     <none>           <none>
    calico-node-nf4nt                          1/1     Running   0             77m   192.168.1.63     k8s-master03   <none>           <none>
    calico-node-sq2ml                          1/1     Running   0             77m   192.168.1.62     k8s-master02   <none>           <none>
    calico-typha-8445487f56-mg6p8              1/1     Running   0             77m   192.168.1.65     k8s-node02     <none>           <none>
    calico-typha-8445487f56-pxbpj              1/1     Running   0             77m   192.168.1.61     k8s-master01   <none>           <none>
    calico-typha-8445487f56-tnssl              1/1     Running   0             77m   192.168.1.64     k8s-node01     <none>           <none>
    coredns-5db5696c7-67h79                    1/1     Running   0             63m   172.25.92.65     k8s-master02   <none>           <none>
    metrics-server-6bf7dcd649-5fhrw            1/1     Running   0             61m   172.18.195.1     k8s-master03   <none>           <none>
    
    # 进入busybox ping其他节点上的pod
    
    kubectl exec -ti busybox -- sh
    / # ping 192.168.1.64
    PING 192.168.1.64 (192.168.1.64): 56 data bytes
    64 bytes from 192.168.1.64: seq=0 ttl=63 time=0.358 ms
    64 bytes from 192.168.1.64: seq=1 ttl=63 time=0.668 ms
    64 bytes from 192.168.1.64: seq=2 ttl=63 time=0.637 ms
    64 bytes from 192.168.1.64: seq=3 ttl=63 time=0.624 ms
    64 bytes from 192.168.1.64: seq=4 ttl=63 time=0.907 ms
    
    # 可以连通证明这个pod是可以跨命名空间和跨主机通信的
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30

    2.16.4. 创建三个副本,可以看到3个副本分布在不同的节点

    cat > deployments.yaml << EOF
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx-deployment
      labels:
        app: nginx
    spec:
      replicas: 3
      selector:
        matchLabels:
          app: nginx
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: docker.io/library/nginx:1.14.2
            ports:
            - containerPort: 80
    
    EOF
    
    kubectl  apply -f deployments.yaml 
    deployment.apps/nginx-deployment created
    
    kubectl  get pod 
    NAME                               READY   STATUS    RESTARTS   AGE
    busybox                            1/1     Running   0          6m25s
    nginx-deployment-9456bbbf9-4bmvk   1/1     Running   0          8s
    nginx-deployment-9456bbbf9-9rcdk   1/1     Running   0          8s
    nginx-deployment-9456bbbf9-dqv8s   1/1     Running   0          8s
    
    # 删除nginx
    
    kubectl delete -f deployments.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
  • 相关阅读:
    营销复盘秘籍,6步法让你的活动效果翻倍
    STM32CubeMX学习笔记(44)——USB接口使用(HID按键)
    Python读取Excel工作表数据写入CSV、XML、文本
    Spring Cloud Alibaba Sentinel 使用
    Python入门之控制结构 - 循环结构
    山东大学软件学院项目实训-创新实训-基于大模型的旅游平台(二十七)- 微服务(7)
    Proxy应用场景
    『网易实习』周记(二)
    【并发】J.U.C之Java锁
    实在TARS大模型斩获多项重磅大奖,AI领域实力认可
  • 原文地址:https://blog.csdn.net/IOT_AI/article/details/128191974