• 【掌握K8S集群部署】手把手真正实现Kubernetes集群的配置与部署(附问题解决方法)


    1、环境准备

    IPHOSTNAME
    10.10.20.15k8s1
    10.10.20.16k8s2
    10.10.20.17k8s3

    注意hostname不要用下划线、小数点与字母。

    2、环境配置(所有节点)

    # stop firewalld
    systemctl stop firewalld
    systemctl disable firewalld
    
    # disable selinux
    sed -i 's/enforcing/disabled/' /etc/selinux/config
    setenforce 0
    
    # disable sawp 
    swapoff -a
    sed -ri 's/.*swap.*/#&/' /etc/fstab 
    
    # config route forward
    cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
    br_netfilter
    EOF
    echo 'net.ipv4.ip_forward = 1' >> /etc/sysctl.d/k8s.conf
    
    # ipv6 config
    echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.d/k8s.conf
    echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.d/k8s.conf
    echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
    echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
    echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
    echo "net.ipv6.conf.all.forwarding = 1"  >> /etc/sysctl.d/k8s.conf
    modprobe br_netfilter
    sysctl --system
    
    # config ipvs
    cat <<EOF | sudo tee /etc/sysconfig/modules/ipvs.modules
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    chmod 755 /etc/sysconfig/modules/ipvs.modules 
    sh /etc/sysconfig/modules/ipvs.modules
    
    # install ipvsadm
    yum install -y ipset ipvsadm
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44

    3、安装Docker(所有节点)

    # 删除系统自带docker组件
    yum -y remove docker*
    # 安装相关组件
    yum -y install yum-utils
    # 配置docker安装镜像源
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
    # 查看可以安装的版本
    yum list docker-ce --showduplicates | sort -r
    
    # 这里安装的是20.10.2版本,需与kubernetes匹配一致
    yum install -y docker-ce-20.10.2  docker-ce-cli-20.10.2 containerd.io
    
    # 启动服务
    systemctl start docker
    systemctl enable docker
    
    # 配置镜像加速, 修改cgroupdriver
    vi  /etc/docker/daemon.json
    {
      "registry-mirrors": [
        "https://registry.docker-cn.com",
        "http://hub-mirror.c.163.com",
        "https://docker.mirrors.ustc.edu.cn"
      ],
      "exec-opts": ["native.cgroupdriver=systemd"]
    }
    
    #重启服务
    systemctl daemon-reload
    systemctl restart docker
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31

    4、安装K8S服务(所有节点)

    # 卸载旧版本kubenetes
    yum remove -y kubelet kubeadm kubectl
    
    # 设置K8S的yum安装源
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
           http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    # 查看可以安装的版本
    yum list kubelet --showduplicates | sort -r
    
    # 安装kubelet、kubeadm、kubectl, 这里安装的版本为1.23.0, 可以支持上面的20.10.2版本
    yum install -y kubelet-1.23.0 kubeadm-1.23.0 kubectl-1.23.0
    
    # 开机启动kubelet
    systemctl enable kubelet
    systemctl start kubelet
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24

    5、配置Master主节点(主节点)

    1. 查看需要的镜像
    [root@k8s1 ~]# kubeadm config images list
    k8s.gcr.io/kube-apiserver:v1.23.14
    k8s.gcr.io/kube-controller-manager:v1.23.14
    k8s.gcr.io/kube-scheduler:v1.23.14
    k8s.gcr.io/kube-proxy:v1.23.14
    k8s.gcr.io/pause:3.6
    k8s.gcr.io/etcd:3.5.1-0
    k8s.gcr.io/coredns/coredns:v1.8.6
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    1. 下载镜像
    # 创建脚本
    vi images.sh
    # 配置脚本内容
    images=(
     kube-apiserver:v1.23.14
     kube-controller-manager:v1.23.14
     kube-scheduler:v1.23.14
     kube-proxy:v1.23.14
     pause:3.6
     etcd:3.5.1-0
     coredns:v1.8.6
     )
    for imageName in ${images[@]} ; 
    do
        docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
    done
    
    # 执行脚本
    chmod +x images.sh && ./images.sh
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20

    如果需修改镜像标签, 命令(默认情况下不需改,下面初始化命令会指定对应空间名称):

    docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
    docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
    
    • 1
    • 2

    3) 主节点执行初始化命令

    # 初始化节点的配置信息
    kubeadm init \
    --apiserver-advertise-address=10.10.20.15 \
    --image-repository registry.aliyuncs.com/google_containers \
    --kubernetes-version v1.23.0 \
    --service-cidr=10.20.0.0/16 \
    --pod-network-cidr=10.30.0.0/16
    
    # --ignore-preflight-errors=Swap 参数可以忽略交换内存的提示错误
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9

    注意: 如果出现问题, 执行kubeadm reset进行还原。

    如果出现初始化报错:

    [kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:10248/healthz' failed with error: Get "http://localhost:10248/healthz": dial tcp 127.0.0.1:10248: connect: connection refused.
    
    • 1

    执行重置命令, 再修改docker的cgroupdriver配置:

    vi /etc/docker/daemon.json 
    
    {
      "registry-mirrors": [
        "https://registry.docker-cn.com",
        "http://hub-mirror.c.163.com",
        "https://docker.mirrors.ustc.edu.cn"
      ],
      "exec-opts": ["native.cgroupdriver=systemd"]
    
    }
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11

    重启docker:

    systemctl daemon-reload
    systemctl restart docker
    
    • 1
    • 2

    初始化,执行成功后, 会出现如下提示:

    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    Alternatively, if you are the root user, you can run:
    
      export KUBECONFIG=/etc/kubernetes/admin.conf
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 10.10.20.15:16688 --token 6vmxut.lb3hvlxhux5suugx \
            --discovery-token-ca-cert-hash sha256:0f2b3e95ecee06bc40eca641548c3ca8afb86ebc2279f3fe2a75960330b0dbd1 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20

    4)主节点执行初始化配置

    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    export KUBECONFIG=/etc/kubernetes/admin.conf
    
    
    
    kubeadm join 10.10.20.15:16688 --token 6vmxut.lb3hvlxhux5suugx \
            --discovery-token-ca-cert-hash sha256:0f2b3e95ecee06bc40eca641548c3ca8afb86ebc2279f3fe2a75960330b0dbd1 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10

    5) 安装helm插件

    wget https://get.helm.sh/helm-v3.6.0-linux-amd64.tar.gz
    tar -zxvf helm-v3.6.0-linux-amd64.tar.gz
    mv linux-amd64/helm  /usr/local/bin/
    
    • 1
    • 2
    • 3

    执行helm list检查能否正确识别。

    6) 安装calico网络插件

    下载tigera-operator插件, 地址: https://github.com/projectcalico/calico/releases

    wget https://github.com/projectcalico/calico/releases/download/v3.23.3/tigera-operator-v3.23.3.tgz
    
    • 1

    安装calico:

    helm install calico tigera-operator-v3.23.3.tgz
    
    • 1

    检查确认相关的pod处于Running状态:

    watch kubectl get pods -n calico-system
    
    • 1

    输出结果:

    Every 2.0s: kubectl get pods -n calico-system                                                                                                                                       
    NAME                                       READY   STATUS    RESTARTS   AGE
    calico-kube-controllers-7bbdbd789c-jcvhz   1/1     Running   0          62m
    calico-node-z6t9g                          1/1     Running   0          62m
    calico-typha-9d5f49b9f-7xvw5               1/1     Running   0          62m
    
    • 1
    • 2
    • 3
    • 4
    • 5

    calico的api资源不建议采用kubectl来管理, 安装calicoctl插件来管理:

    wget https://github.com/projectcalico/calico/releases/download/v3.23.3/calicoctl-linux-amd64
    mv calicoctl-linux-amd64 kubectl-calico
    chmod +x kubectl-calico
    
    • 1
    • 2
    • 3

    验证插件是否正常:

    kubectl calico -h
    
    • 1

    7) 验证k8s的dns

    运行curl 容器

    kubectl run curl --image=radial/busyboxplus:curl -it
    
    • 1

    查看curl状态, kubectl describe pod curl 如果出现错误不能调度:

    Events:
      Type     Reason            Age                 From               Message
      ----     ------            ----                ----               -------
      Warning  FailedScheduling  38m                 default-scheduler  0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.
      Warning  FailedScheduling  23m (x14 over 37m)  default-scheduler  0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.
    
    • 1
    • 2
    • 3
    • 4
    • 5

    将master该为可调度:

     kubectl taint nodes --all node-role.kubernetes.io/master-
    
    • 1

    进入curl容器:

    kubectl exec -it curl /bin/sh
    
    • 1

    进行解析,输出一下结果,确认正常:

    [ root@curl:/ ]$ nslookup kubernetes.default
    Server:    10.20.0.10
    Address 1: 10.20.0.10 kube-dns.kube-system.svc.cluster.local
    
    Name:      kubernetes.default
    Address 1: 10.20.0.1 kubernetes.default.svc.cluster.local
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    6、部署一个Nginx应用服务

    kubectl create deployment my-nginx --image=nginx
    
    • 1

    查看IP地址:

    [root@k8s1 bin]# kubectl get pod -owide
    NAME                               READY   STATUS    RESTARTS       AGE     IP              NODE   NOMINATED NODE   READINESS GATES
    curl                               1/1     Running   0              121m    10.30.166.198   k8s1              
    my-nginx-c54945c55-lhmlt           1/1     Running   0              8m27s   10.30.219.1     k8s3              
    tigera-operator-56d4765449-tw7bv   1/1     Running   1 (136m ago)   140m    10.10.20.15     k8s1              
    
    • 1
    • 2
    • 3
    • 4
    • 5

    进行访问:

    [root@k8s1 bin]# curl 10.30.219.1
    <!DOCTYPE html>
    <html>
    <head>
    <title>Welcome to nginx!</title>
    <style>
    html { color-scheme: light dark; }
    body { width: 35em; margin: 0 auto;
    font-family: Tahoma, Verdana, Arial, sans-serif; }
    </style>
    </head>
    <body>
    <h1>Welcome to nginx!</h1>
    <p>If you see this page, the nginx web server is successfully installed and
    working. Further configuration is required.</p>
    
    <p>For online documentation and support please refer to
    <a href="http://nginx.org/">nginx.org</a>.<br/>
    Commercial support is available at
    <a href="http://nginx.com/">nginx.com</a>.</p>
    
    <p><em>Thank you for using nginx.</em></p>
    </body>
    </html>
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24

    应用服务部署成功, 可以正常访问。

    选择对应的calico版本, 确定能够支持所安装kubernetes

    查看地址: https://projectcalico.docs.tigera.io/archive/v3.23/getting-started/kubernetes/requirements

    在这里插入图片描述

    执行命令:

    kubectl apply -f https://projectcalico.docs.tigera.io/archive/v3.23/manifests/tigera-operator.yaml
    kubectl apply -f https://projectcalico.docs.tigera.io/archive/v3.23/manifests/custom-resources.yaml
    
    • 1
    • 2

    7、部署Dashboard管理后台

    下载配置文件:

    wget  https://raw.githubusercontent.com/kubernetes/dashboard/v2.5.0/aio/deploy/recommended.yaml
    
    • 1

    对外暴露, 修改recommended.yaml文件:

    增加type: NodePort

    ...
    spec:
      ports:
        - port: 443
          targetPort: 8443
      type: NodePort
    ...
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7

    执行: kubectl get svc -A 查看dashboard的对外访问端口:

    在这里插入图片描述

    chrome浏览器进行访问:

    在这里插入图片描述

    如果不能访问, 点击页面任意空白位置,盲敲:thisisunsafe 确认即可访问。

    在这里插入图片描述

    配置dashboard-config.yaml:

    apiVersion: networking.k8s.io/v1
    kind: Ingress
    metadata:
      name: kubernetes-dashboard
      namespace: kube-system
      annotations:
        nginx.ingress.kubernetes.io/ssl-redirect: "false"
        nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
    spec:
      ingressClassName: nginx
      tls:
      - hosts:
        - k8s.example.com
        secretName: example-com-tls-secret
      rules:
      - host: k8s.example.com
        http:
          paths:
          - path: /
            pathType: Prefix
            backend:
              service:
               name: kubernetes-dashboard
               port:
                 number: 443
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25

    创建管理员:

    kubectl create serviceaccount kube-dashboard-admin-sa -n kube-system
    
    kubectl create clusterrolebinding kube-dashboard-admin-sa \
    --clusterrole=cluster-admin --serviceaccount=kube-system:kube-dashboard-admin-sa
    
    • 1
    • 2
    • 3
    • 4

    获取登录token:

    [root@k8s1 ~]# kubectl -n kube-system get secret | grep kube-dashboard-admin-sa-token
    kube-dashboard-admin-sa-token-jtfxk              kubernetes.io/service-account-token   3      78s
    
    • 1
    • 2

    执行describe指令获取

    [root@k8s1 ~]# kubectl describe -n kube-system secret/kube-dashboard-admin-sa-token-jtfxk
    Name:         kube-dashboard-admin-sa-token-jtfxk
    Namespace:    kube-system
    Labels:       
    Annotations:  kubernetes.io/service-account.name: kube-dashboard-admin-sa
                  kubernetes.io/service-account.uid: 85dabe0c-dd93-457e-b077-6c732e72a009
    
    Type:  kubernetes.io/service-account-token
    
    Data
    ====
    ca.crt:     1099 bytes
    namespace:  11 bytes
    token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IlBJV1FfTDN2ZWNIUTBMWVU2NGJ0WnRTVzF6QVNjZXlNWDNuY1o4S3B0V2MifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlLWRhc2hib2FyZC1hZG1pbi1zYS10b2tlbi1qdGZ4ayIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlLWRhc2hib2FyZC1hZG1pbi1zYSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6Ijg1ZGFiZTBjLWRkOTMtNDU3ZS1iMDc3LTZjNzMyZTcyYTAwOSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlLWRhc2hib2FyZC1hZG1pbi1zYSJ9.YPke1H3fdH_Vmw2980e-Kn2yRWklcvOt3o9ryfedmD5SLR_lkkUZb996SwZPb0mxReZi7Gjws5JdDYKnskIgvTTp8encsQ2UpLiC0myyzPUg6KP_3IHiTJ52n40mFNaZ7BzdyyYizatDWB89LruE2QrhEXdgOFxe-Z1GvzMdUpeAzrhV_a_bfE5iCkWmiw1jmaVba3X_MLiDoVPdUsQRovk6oZCNAzs9ElS0Hvb-vt4Ye6zI68Z0q3An36QFRk1CIE2RZfysq92QRSKgvRf8SgKN1UqyGFr9ICQcZTeiL0wNFGk04t6Z83RTK5n0BzojTrZwT-r0OGJb5coBJIkLPA
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15

    通过token进行登录, 能够正确显示集群信息:

    在这里插入图片描述
    为避免所涉及的资源访问获取不了,提供了部分配置与安装包, 资源地址: https://download.csdn.net/download/hxx688/87189540

  • 相关阅读:
    游戏复用列表实现思路
    2 C++中的引用
    Android 启动service(Kotlin)
    某计费管理系统任意文件读取漏洞
    实现自定义SpringBoot的Starter
    初识Spring Data JPA
    MedNeRF:用于从单个X射线重建3D感知CT投影的医学神经辐射场
    凌恩客户文献|宏基因组binning解析中国浓味白酒窖泥微生物群落MAGs和代谢潜力
    算法与设计分析--实验一
    Springboot列车调度信息系统的设计与实现4guf9计算机毕业设计-课程设计-期末作业-毕设程序代做
  • 原文地址:https://blog.csdn.net/hxx688/article/details/128070841