• k8s单master--测试环境集群搭建


    一、准备实验环境

    192.168.1.11 master 2核4G

    192.168.1.12 node1 2核4G

    二、初始化实验环境

    [root@master ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo     
    [root@master ~]# curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    [root@master ~]# cat < /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    EOF
    [root@master ~]# yum makecache fast
    
    [root@master ~]# systemctl stop firewalld  && systemctl  disable  firewalld
    [root@master ~]# yum -y install ntpdate
    [root@master ~]# ntpdate cn.pool.ntp.org
    26 Sep 22:56:20 ntpdate[1075]: adjust time server 139.199.214.202 offset -0.161591 sec
    
    [root@master ~]# crontab -e
    * */1 * * * /usr/sbin/ntpdate  cn.pool.ntp.org
    [root@master ~]# service crond restart
    [root@master ~]# swapoff  -a
    [root@master ~]# sed -i '/swap/s/^/#/' /etc/fstab
    [root@master ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
    [root@master ~]# reboot -f
    

    安装基础软件包,各个节点操作

    [root@master ~]# yum -y install wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel  vim ncurses-devel autoconf automake zlib-devel  python-devel epel-release openssh-server socat  ipvsadm conntrack ntpdate
    

    修改内核参数,各个节点操作

    [root@master ~]# cat <  /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    [root@master ~]# modprobe br_netfilter
    
    [root@master ~]# sysctl --system
    ...
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    

    配置hosts文件,各个节点操作

    192.168.1.11  master
    192.168.1.12  node1
    

    配置master1到node1无密码登陆,配置master1到node1无密码登陆

    [root@master ~]# ssh-keygen -t rsa
    [root@master ~]# cd /root && ssh-copy-id -i .ssh/id_rsa.pub root@node1
    

    安装kubernetes1.18.2

    mkdir  /etc/docker
    cat > /etc/docker/daemon.json <
    yum -y install docker-ce-19.03.7-3.el7
    systemctl enable docker && systemctl start docker && systemctl status docker
    

    设置网桥包经IPTables,core文件生成路径,配置永久生效

    echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
    echo 1 >/proc/sys/net/bridge/bridge-nf-call-ip6tables
    echo """
    vm.swappiness = 0
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    net.bridge.bridge-nf-call-ip6tables = 1
    """ > /etc/sysctl.conf
    sysctl -p
    

    开启ipvs,不开启ipvs将会使用iptables,但是效率低,所以官网推荐需要开通ipvs内核

    cat > /etc/sysconfig/modules/ipvs.modules < /dev/null 2>&1
     if [ $? -eq 0 ]; then
     /sbin/modprobe \${kernel_module}
     fi
    done
    EOF
    
    chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
    

    安装kubernetes1.18.2

    yum -y install kubeadm-1.18.2  kubelet-1.18.2
    systemctl enable kubelet
    
    [root@master ~]# ls *.gz
    1-18-cordns.tar.gz  1-18-kube-apiserver.tar.gz           1-18-kube-proxy.tar.gz      1-18-pause.tar.gz
    1-18-etcd.tar.gz    1-18-kube-controller-manager.tar.gz  1-18-kube-scheduler.tar.gz
    calico-node.tar.gz    cni.tar.gz
    [root@master ~]# for i  in `ls *.gz`;do docker load -i $i ;done
    [root@master ~]# docker images
    REPOSITORY                           TAG            IMAGE ID       CREATED       SIZE
    k8s.gcr.io/kube-proxy                v1.18.2        0d40868643c6   2 years ago   117MB
    k8s.gcr.io/kube-controller-manager   v1.18.2        ace0a8c17ba9   2 years ago   162MB
    k8s.gcr.io/kube-apiserver            v1.18.2        6ed75ad404bd   2 years ago   173MB
    k8s.gcr.io/kube-scheduler            v1.18.2        a3099161e137   2 years ago   95.3MB
    k8s.gcr.io/pause                     3.2            80d28bedfe5d   2 years ago   683kB
    k8s.gcr.io/coredns                   1.6.7          67da37a9a360   2 years ago   43.8MB
    k8s.gcr.io/etcd                      3.4.3-0        303ce5db0e90   2 years ago   288MB
    [root@node1 ~]# docker images
    REPOSITORY              TAG       IMAGE ID       CREATED       SIZE
    k8s.gcr.io/kube-proxy   v1.18.2   0d40868643c6   2 years ago   117MB
    k8s.gcr.io/pause        3.2       80d28bedfe5d   2 years ago   683kB
    

    初始化k8s集群

    kubeadm init --kubernetes-version=v1.18.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=192.168.1.11
    

    注:如果没有上传镜像到各个节点,那么用下面的方法初始化k8s集群:

    kubeadm init --image-repository=registry.aliyuncs.com/google_containers --kubernetes-version=v1.18.2 --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address 192.168.1.11
    

    初始化命令执行成功之后显示如下内容,说明初始化成功了

    Your Kubernetes control-plane has initialized successfully!
    
    To start using your cluster, you need to run the following as a regular user:
    
      mkdir -p $HOME/.kube
      sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
      sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    You should now deploy a pod network to the cluster.
    Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
      https://kubernetes.io/docs/concepts/cluster-administration/addons/
    
    Then you can join any number of worker nodes by running the following on each as root:
    
    kubeadm join 192.168.1.11:6443 --token 4laf0k.rsh962h3mxvgv96d \
        --discovery-token-ca-cert-hash sha256:814273ecb7350f04ee62fdd44559200e4e013edd1e529d35eae60b4b5aff5ba8
    
    [root@master ~]# mkdir -p $HOME/.kube
    [root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    [root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
    [root@master ~]# kubectl get nodes
    NAME     STATUS     ROLES    AGE   VERSION
    master   NotReady   master   44s   v1.18.2
    kubectl get pods -n kube-system   #cordns也是处于pending状态
    

    手动上传上面两个镜像的压缩包到各个节点,通过docker load -i解压

    docker load -i  cni.tar.gz
    docker load -i  calico-node.tar.gz
    

    在master节点执行如下:

    [root@master ~]# vim calico.yaml
    167               value: "can-reach=192.168.1.11"
    kubectl apply -f calico.yaml
    kubectl get nodes
    NAME     STATUS   ROLES    AGE   VERSION
    master1   Ready    master   46m   v1.18.2
    kubectl get pods -n kube-system
    NAME                            READY   STATUS    RESTARTS   AGE
    coredns-66bff467f8-dqdd6         1/1     Running   0          47m
    coredns-66bff467f8-qr5zg         1/1     Running   0          47m
    

    把node1节点加入到k8s集群,在node1节点操作

    这里使用了永久的token

    [root@master ~]# kubeadm token list
    [root@master ~]# kubeadm token create --ttl=0
    W0928 23:07:18.994752   14297 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
    srd9aw.kbvkg6aqui9x9alu
    [root@master ~]# kubeadm token list
    TOKEN                     TTL         EXPIRES   USAGES                   DESCRIPTION                                                EXTRA GROUPS
    srd9aw.kbvkg6aqui9x9alu         authentication,signing                                                        system:bootstrappers:kubeadm:default-node-token
    [root@master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der |openssl dgst -sha256 -hex
    writing RSA key
    (stdin)= 814273ecb7350f04ee62fdd44559200e4e013edd1e529d35eae60b4b5aff5ba8
    
    [root@node1 ~]# kubeadm join 192.168.1.11:6443 --token srd9aw.kbvkg6aqui9x9alu \
        --discovery-token-ca-cert-hash sha256:814273ecb7350f04ee62fdd44559200e4e013edd1e529d35eae60b4b5aff5ba8
    

    在master1节点查看集群节点状态

    [root@master ~]# kubectl get node
    NAME     STATUS   ROLES    AGE     VERSION
    master   Ready    master   20m     v1.18.2
    node1    Ready       2m24s   v1.18.2
    

    TAB键的补齐

    [root@master ~]# kubectl completion bash >/etc/bash_completion.d/kubectl
    [root@master ~]# kubeadm completion bash >/etc/bash_completion.d/kubeadm
    [root@master ~]# exit       #退出再进生效
    [root@master ~]# kubectl		Tab  Tab
    

    安装traefik (每个节点都要)

    docker load -i traefik_1_7_9.tar.gz   #traefik用到的镜像是k8s.gcr.io/traefik:1.7.9
    

    1)生成traefik证书,在master上操作

    [root@master ~]# mkdir ikube/tls -p
    [root@master ~]# echo """
    [req]
    distinguished_name = req_distinguished_name
    prompt = yes
    
    [ req_distinguished_name ]
    countryName = Country Name (2 letter code)
    countryName_value=CN
    
    stateOrProvinceName = State or Province Name (full name)
    stateOrProvinceName_value    = Beijing
    
    localityName= Locality Name (eg, city)
    localityName_value = Haidian
    
    organizationName  = Organization Name (eg, company)
    organizationName_value = Channelsoft
    
    organizationalUnitName = Organizational Unit Name (eg, section)
    organizationalUnitName_value  = R & D Department
    
    commonName = Common Name (eg, your name or your server\'s hostname)
    commonName_value = *.multi.io
    
    emailAddress = Email Address
    emailAddress_value = lentil1016@gmail.com
    """ > ikube/tls/openssl.cnf
    [root@master ~]# openssl req -newkey rsa:4096 -nodes -config ~/ikube/tls/openssl.cnf -days 3650 -x509 -out ~/ikube/tls/tls.crt -keyout ~/ikube/tls/tls.key
    [root@master ~]# kubectl create -n kube-system secret tls ssl --cert ~/ikube/tls/tls.crt --key ~/ikube/tls/tls.key
    secret/ssl created
    

    2)执行yaml文件,创建traefik

    [root@master ~]# kubectl apply -f traefik.yaml
    clusterrole.rbac.authorization.k8s.io/traefik-ingress-controller created
    clusterrolebinding.rbac.authorization.k8s.io/traefik-ingress-controller created
    serviceaccount/traefik-ingress-controller created
    configmap/traefik-conf created
    daemonset.apps/traefik-ingress-controller created
    service/traefik-ingress-service created
    service/traefik-web-ui created
    ingress.extensions/traefik-web-ui created
    

    3)查看traefik是否部署成功:

    [root@master ~]#  kubectl get pod -n kube-system -owide |grep traefik
    traefik-ingress-controller-2mxsm   1/1     Running   0          61s   192.168.1.12   node1               
    traefik-ingress-controller-tgbk7   1/1     Running   0          61s   192.168.1.11   master              
    

    安装kubernetes-dashboard-2版本(kubernetes的web ui界面)

    docker load -i dashboard_2_0_0.tar.gz
    docker load -i metrics-scrapter-1-0-1.tar.gz
    

    在master1节点操作

    [root@master ~]# kubectl apply -f kubernetes-dashboard.yaml
    namespace/kubernetes-dashboard created
    serviceaccount/kubernetes-dashboard created
    service/kubernetes-dashboard created
    secret/kubernetes-dashboard-certs created
    secret/kubernetes-dashboard-csrf created
    secret/kubernetes-dashboard-key-holder created
    configmap/kubernetes-dashboard-settings created
    role.rbac.authorization.k8s.io/kubernetes-dashboard created
    clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
    rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
    clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
    deployment.apps/kubernetes-dashboard created
    service/dashboard-metrics-scraper created
    deployment.apps/dashboard-metrics-scraper created
    

    查看dashboard是否安装成功:

    [root@master ~]# kubectl get pods -n kubernetes-dashboard   #等一下
    NAME                                         READY   STATUS    RESTARTS   AGE
    dashboard-metrics-scraper-694557449d-km562   1/1     Running   0          2m26s
    kubernetes-dashboard-5f98bdb684-ssmx9        1/1     Running   0          2m26s
    

    查看dashboard前端的service

    [root@master1 ~]# kubectl get svc -n kubernetes-dashboard
    NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
    dashboard-metrics-scraper   ClusterIP   10.107.91.74          8000/TCP   49s
    kubernetes-dashboard        ClusterIP   10.99.59.88         443/TCP    50s
    
    修改service type类型变成NodePort:clusterIP只能集群内部访问
    kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
         28   type: NodePort
    [root@master1 ~]# kubectl get svc -n kubernetes-dashboard
    NAME                        TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)         AGE
    dashboard-metrics-scraper   ClusterIP   10.107.91.74           8000/TCP        11m
    kubernetes-dashboard        NodePort    10.99.59.88            443:30260/TCP   11m
    

    火狐浏览器访问https://192.168.1.11:31398/#/login 加密

    在这里插入图片描述
    通过yaml文件里指定的默认的token登陆dashboard

    1)查看kubernetes-dashboard名称空间下的secret
    [root@master1 ~]# kubectl get secret -n kubernetes-dashboard
    NAME                               TYPE                                  DATA   AGE
    default-token-wzcwc                kubernetes.io/service-account-token   3      16m
    kubernetes-dashboard-certs         Opaque                                0      16m
    kubernetes-dashboard-csrf          Opaque                                1      16m
    kubernetes-dashboard-key-holder    Opaque                                2      16m
    kubernetes-dashboard-token-lqws6   kubernetes.io/service-account-token   3      16m
    
    2)找到对应的带有token的kubernetes-dashboard-token
    [root@master ~]# kubectl describe secret kubernetes-dashboard-token-64xsg -n kubernetes-dashboard
    Name:         kubernetes-dashboard-token-64xsg
    Namespace:    kubernetes-dashboard
    Labels:       
    Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard
                  kubernetes.io/service-account.uid: 29db23b4-dc7a-42d0-963d-b7e1d2090967
    
    Type:  kubernetes.io/service-account-token
    
    Data
    ====
    ca.crt:     1025 bytes
    namespace:  20 bytes
    token:      eyJhbGciOiJSUzI1NiIsImtpZCI6......
    
    点击sing in登陆,显示如下,默认是只能看到default名称空间内容
    

    点击sing in登陆,显示如下,默认是只能看到default名称空间内容
    在这里插入图片描述

    3.2 创建管理员token,可查看任何空间权限

    [root@master1 ~]# kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kubernetes-dashboard:kubernetes-dashboard
    

    刷新就可以看到和操作任何名称空间的资源了

    在这里插入图片描述
    dashboard可以做到一定的可视化及维护,但部署资源文件的时候还需要手写

    安装metrics插件

    [root@master ~]# kubectl top nodes
    error: Metrics API not available
     docker load -i metrics-server-amd64_0_3_1.tar.gz ;docker load -i addon.tar.gz
    [root@master ~]# kubectl apply -f metrics.yaml
    [root@master ~]# kubectl get pod -n kube-system -owide
    metrics-server-8459f8db8c-r6mrz    2/2     Running   0          43s    10.244.3.4     node1
    [root@master ~]#  kubectl top nodes
    NAME      CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
    master1   225m         11%    1220Mi          70%
    node1     97m          4%     671Mi           39%
    [root@master1 ~]# kubectl top pods -n kube-system
    NAME                               CPU(cores)   MEMORY(bytes)
    calico-node-5rk6g                  21m          29Mi
    coredns-66bff467f8-hnwkm         3m           10Mi
    etcd-master1                       57m          89Mi
    上面如果看到metrics-server-8459f8db8c-5p59m是running状态,说明metrics-server组件部署成功了
    

    关于kubeconfig文件制作可以看之前的文章
    https://blog.csdn.net/weixin_60092693/article/details/122521981

  • 相关阅读:
    修改时间服务器-域控环境
    springboot:整合其它项目
    营销投入大没效果?痛点难点一站式解决!
    Spring XmlBeanFactory 容器的基本实现
    Docker Desktop 配置阿里云镜像加速
    Flask 学习-4.templates 渲染模板
    阻性负载和感性负载的区别
    Linux小程序-进度条
    算法详解——贪心算法
    【毕业设计】Spark海量新闻文本聚类(文本分类)
  • 原文地址:https://blog.csdn.net/weixin_60092693/article/details/127099105