• 在线虚拟机安装-云原生边缘计算KubeEdge安装配置(使用负载均衡器LoadBalancer)


    安装过程所有资料获取地址

    1.准备一个K8S集群

    这里部署k8s版本为1.22或者1.23版本。这里参考kubeedge支持的程度,选择1.22版本,后续会支持更高的k8s版本。

    相关部署环境及部署组件:

    主机名ip地址节点类型系统版本
    k8s-ke-cloud192.168.186.128master、etcdcentos7.9
    k8s-ke-edge1192.168.186.129workercentos7.9
    ke-edge1192.168.186.130agent,edgecentos7.9
    最终安装成功是这样的:
    [root@k8s-ke-cloud ~]# kubectl get nodes
    NAME           STATUS   ROLES                  AGE     VERSION
    k8s-ke-cloud   Ready    control-plane,master   4h19m   v1.22.17
    k8s-ke-edge1   Ready    <none>                 4h11m   v1.22.17
    ke-edge1       Ready    agent,edge             65m     v1.23.15-kubeedge-v1.13.1
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    #主机名:
    hostnamectl set-hostname k8s-ke-cloud  && bash
    hostnamectl set-hostname k8s-ke-edge1  && bash
    hostnamectl set-hostname ke-edge1  && bash
    
    • 1
    • 2
    • 3
    • 4
    #关闭防火墙:
    systemctl stop firewalld
    systemctl disable firewalld
    #关闭selinux:
    sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久
    setenforce 0 # 临时
    #关闭swap:
    swapoff -a # 临时
    sed -i 's/.*swap.*/#&/' /etc/fstab # 永久
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    #添加hosts:
    cat >> /etc/hosts << EOF
    192.168.186.128  k8s-ke-cloud
    192.168.186.129  k8s-ke-edge1
    192.168.186.130  ke-edge1
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    #设置yum源
    yum -y install wget
    wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
    wget -O /etc/yum.repos.d/epel.repo https://mirrors.aliyun.com/repo/epel-7.repo
    
    • 1
    • 2
    • 3
    • 4
    #时间同步
    yum install -y chrony -y 
    cat > /etc/chrony.conf << EOF
    server ntp.aliyun.com iburst
    stratumweight 0
    driftfile /var/lib/chrony/drift
    rtcsync
    makestep 10 3
    bindcmdaddress 127.0.0.1
    bindcmdaddress ::1
    keyfile /etc/chrony.keys
    commandkey 1
    generatecommandkey
    logchange 0.5
    logdir /var/log/chrony
    EOF
    
    systemctl enable --now chronyd 
    chronyc sources 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    #配置内核参数
    cat > /etc/modprobe.d/k8s.conf <<EOF
    #!/bin/bash
    #modprobe -- br_netfilter
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    
    chmod 755 /etc/modprobe.d/k8s.conf
    bash /etc/modprobe.d/k8s.conf
    lsmod | grep -E "ip_vs|nf_conntrack_ipv4"
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    #加载ip_vs内核模块
    cat > /etc/sysctl.conf << EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.bridge.bridge-nf-call-arptables = 1
    net.ipv4.ip_forward=1
    net.ipv4.ip_forward_use_pmtu = 0
    EOF
    
    sysctl --system
    sysctl -a|grep "ip_forward"
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    #安装Docker
    yum -y install epel-release wget
    wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
    yum -y install docker-ce-20.10.0-3.el7  #指定版本
    #yum -y install docker-ce  #安装docker最新版
    systemctl enable docker && systemctl start docker && systemctl status docker
    
    #设置仓库地址
    cat > /etc/docker/daemon.json << EOF
    {
    "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
      "exec-opts": ["native.cgroupdriver=systemd"]
    } 
    EOF
    
    systemctl restart docker &&  systemctl status docker
    docker --version
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    #添加yum 源
    cat > /etc/yum.repos.d/kubernetes.repo << EOF
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
    https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    #安装kubeadm,kubelet 和kubectl
    yum clean all && yum makecache -y
    #yum list kubectl --showduplicates | sort -r #列出kubectl可用的版本
    
    yum install -y kubelet-1.22.17-0 kubeadm-1.22.17-0 kubectl-1.22.17-0
    systemctl enable kubelet
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    kubeadm init --apiserver-advertise-address=192.168.186.128  --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.22.17 --service-cidr=10.96.0.0/12  --pod-network-cidr=10.244.0.0/16 
    
    • 1
    mkdir -p $HOME/.kube
    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    sudo chown $(id -u):$(id -g) $HOME/.kube/config
    
    • 1
    • 2
    • 3
    kubectl apply -f  calico.yaml  #这个文件单独发你了
    
    • 1
    部署metrics-server
    wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
    kubectl apply -f components.yaml
    kubectl get pods -n kube-system
    
    • 1
    • 2
    • 3
    • 4
    #修改yaml文件,让metrics-server不需要识别证书
    kubectl patch deploy metrics-server -n kube-system --type='json' -p='[{"op":"add","path":"/spec/template/spec/containers/0/args/-","value":"--kubelet-insecure-tls"}]'
    
    • 1
    • 2
    # 在kubernetes群集中创建Nginx
    kubectl create deployment nginx --image=nginx 
    kubectl expose deployment nginx --port=80 --type=NodePort
    kubectl scale deployment  nginx --replicas=2  #pod副本的数量增加到10个
    kubectl get pod,svc -o wide
    
    • 1
    • 2
    • 3
    • 4
    • 5
    #测试正常后请删除
    kubectl delete deployment,svc nginx
    kubectl get pod,svc,deployment
    
    • 1
    • 2
    • 3

    浏览器打开:集群任意IP:端口 浏览器能打开 到这里说明集群安装成功了

    2. 配置MetaILB

    由于需要为cloudcore与edgecore提供通信地址,建议使用LB为cloudcore提供公网IP或k8s集群节点相同网段IP地址,实际生产中使用的是公网IP地址
    kubectl edit configmap -n kube-system kube-proxy
    
    #修改配置文件
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: "ipvs"
    ipvs:
      strictARP: true
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    kubectl rollout restart daemonset kube-proxy -n kube-system  #重新加载并重启
    kubectl get pods -n kube-system
    
    • 1
    • 2
    wget https://raw.githubusercontent.com/metallb/metallb/v0.13.5/config/manifests/metallb-native.yaml
    kubectl apply -f metallb-native.yaml
    kubectl get pods -n metallb-system
    
    • 1
    • 2
    • 3
    [root@k8s-ke-cloud ~]# kubectl get pods -n metallb-system
    NAME                          READY   STATUS    RESTARTS   AGE
    controller-6846c94466-w6v8v   1/1     Running   0          37s
    speaker-59hr4                 1/1     Running   0          37s
    speaker-g5rv5                 1/1     Running   0          36s
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    #配置地址池——二层模式转发,配置使 MetalLB 能够控制 IP 从 到 ,并配置第 2 层 模式
    cat > first-ipaddresspool.yaml << EOF
    apiVersion: metallb.io/v1beta1
    kind: IPAddressPool
    metadata:
      name: first-pool
      namespace: metallb-system
    spec:
      addresses:
      - 192.168.186.240-192.168.186.250
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    kubectl apply -f first-ipaddresspool.yaml  #应用
    kubectl get ipaddresspool -n metallb-system  #查询
    
    • 1
    • 2
    [root@k8s-ke-cloud ~]# kubectl get ipaddresspool -n metallb-system  #查询
    NAME         AGE
    first-pool   56s
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    cat >  l2-forward.yaml << EOF
    apiVersion: metallb.io/v1beta1
    kind: L2Advertisement
    metadata:
      name: example
      namespace: metallb-system
    EOF
    
    kubectl apply -f l2-forward.yaml
    kubectl get l2advertisements -n metallb-system
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    [root@k8s-ke-cloud ~]# kubectl get l2advertisements -n metallb-system
    NAME      AGE
    example   112s
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    #测试——使用LoadBalancer
    cat > nginx-loadbalancer.yaml << EOF
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx-deployment
    spec:
      selector:
        matchLabels:
          app: nginx
      replicas: 2  # 可根据需要进行调整
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: nginx
            ports:
            - containerPort: 80
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx-service
    spec:
      selector:
        app: nginx
      ports:
        - protocol: TCP
          port: 80
          targetPort: 80
      type: LoadBalancer
    EOF
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    kubectl apply -f nginx-loadbalancer.yaml
    kubectl get pods,svc
    
    • 1
    • 2
    [root@k8s-ke-cloud ~]# kubectl get pods,svc
    NAME                                    READY   STATUS    RESTARTS   AGE
    pod/nginx-deployment-7848d4b86f-nc7cx   1/1     Running   0          77s
    pod/nginx-deployment-7848d4b86f-prglx   1/1     Running   0          77s
    
    NAME                    TYPE           CLUSTER-IP      EXTERNAL-IP       PORT(S)        AGE
    service/kubernetes      ClusterIP      10.96.0.1       <none>            443/TCP        81m
    service/nginx-service   LoadBalancer   10.106.158.39   192.168.186.240   80:31243/TCP   77s
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    #测试后删除
    kubectl delete deployment   nginx-deployment
    kubectl delete svc    nginx-service
    
    • 1
    • 2
    • 3

    3. 部署kubeedge

    3.1 部署cloudcore

    #获取keadm工具
    wget https://github.com/kubeedge/kubeedge/releases/download/v1.13.1/keadm-v1.13.1-linux-amd64.tar.gz
    
    tar -zxvf keadm-v1.13.1-linux-amd64.tar.gz  #解压
    cp keadm-v1.13.1-linux-amd64/keadm/keadm /usr/local/bin/  #复制keadm
    keadm version  #测试
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    #cloudcore部署
    keadm init --advertise-address=192.168.186.240 --set iptablesManager.mode="external" --profile version=v1.13.1 
    
    • 1
    • 2
    [root@k8s-ke-cloud ~]# keadm init --advertise-address=192.168.186.240 --set iptablesManager.mode="external" --profile version=v1.13.1 
    Failed to get the latest KubeEdge release version, error:  failed to get latest version from https://kubeedge.io/latestversion: Get "https://kubeedge.io/latestversion": x509: certificate has expired or is not yet valid: current time 2023-05-27T12:39:57+08:00 is before 2023-10-07T01:12:26Z
    Failed to get the latest KubeEdge release version, error:  failed to get latest version from https://kubeedge.io/latestversion: Get "https://kubeedge.io/latestversion": x509: certificate has expired or is not yet valid: current time 2023-05-27T12:39:59+08:00 is before 2023-10-07T01:12:26Z
    Failed to get the latest KubeEdge release version, error:  failed to get latest version from https://kubeedge.io/latestversion: Get "https://kubeedge.io/latestversion": x509: certificate has expired or is not yet valid: current time 2023-05-27T12:39:59+08:00 is before 2023-10-07T01:12:26Z
    Failed to get the latest KubeEdge release version, error:  failed to get latest version from https://kubeedge.io/latestversion: Get "https://kubeedge.io/latestversion": x509: certificate has expired or is not yet valid: current time 2023-05-27T12:39:59+08:00 is before 2023-10-07T01:12:26Z
    Failed to get the latest KubeEdge release version, error:  failed to get latest version from https://kubeedge.io/latestversion: Get "https://kubeedge.io/latestversion": x509: certificate has expired or is not yet valid: current time 2023-05-27T12:39:59+08:00 is before 2023-10-07T01:12:26Z
    Failed to get the latest KubeEdge release version, will use default version:  1.12.0
    Kubernetes version verification passed, KubeEdge installation will start...
    CLOUDCORE started
    =========CHART DETAILS=======
    NAME: cloudcore
    LAST DEPLOYED: Sat May 27 12:40:02 2023
    NAMESPACE: kubeedge
    STATUS: deployed
    REVISION: 1
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    keadm gettoken #生成token,请记住
    kubectl get ns
    kubectl get pod,svc  -n kubeedge
    
    • 1
    • 2
    • 3
    [root@k8s-ke-cloud ~]# ps -ef|grep cloudcore  #查询到进程,负责加入不进集群
    root      2447  8609  0 09:54 pts/0    00:00:00 grep --color=auto cloudcore
    root      9990  9943  0 09:09 ?        00:00:01 cloudcore
    [root@k8s-ke-cloud ~]# netstat -anltp #查看端口
    
    
    [root@k8s-ke-cloud ~]# keadm gettoken #生成token,请记住
    f28b7497c541833c1d7f8ebcdf4532ff82d297a75fb879bb6b1d793c06f1137a.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODUyNDgzNzB9.LzeoF_7GVgajogyp_8k8co37u6OC8NCxGWb3E8OzNUU
    [root@k8s-ke-cloud ~]# 
    
    [root@k8s-ke-cloud ~]# kubectl get pod,svc  -n kubeedge
    NAME                               READY   STATUS    RESTARTS   AGE
    pod/cloud-iptables-manager-72l9c   1/1     Running   0          80s
    pod/cloudcore-5475cc4b46-xqj9m     1/1     Running   0          80s
    
    NAME                TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                                             AGE
    service/cloudcore   ClusterIP   10.107.192.156   <none>        10000/TCP,10001/TCP,10002/TCP,10003/TCP,10004/TCP   81s
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    #修改cloudcore的svc类型为loadBalancer
    kubectl edit svc cloudcore -n kubeedge
    
    #修改内容
         49   selector:
         50     k8s-app: kubeedge
         51     kubeedge: cloudcore
         52   sessionAffinity: None
         53   type: LoadBalancer   #修改为LoadBalancer
         54 status:
         55   loadBalancer: {}
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    [root@k8s-ke-cloud ~]# kubectl get  svc  -n kubeedge  #修改后看到了地址池中的第一个地址
    NAME        TYPE           CLUSTER-IP       EXTERNAL-IP       PORT(S)                                                                           AGE
    cloudcore   LoadBalancer   10.107.192.156   192.168.186.240   10000:30718/TCP,10001:30622/TCP,10002:30083/TCP,10003:30798/TCP,10004:31650/TCP   4m45s
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    [root@k8s-ke-cloud ~]# kubectl get  pods,svc,deployment  -n kubeedge  #看到了都正常
    NAME                               READY   STATUS    RESTARTS   AGE
    pod/cloud-iptables-manager-72l9c   1/1     Running   0          5m49s
    pod/cloudcore-5475cc4b46-xqj9m     1/1     Running   0          5m49s
    
    NAME                TYPE           CLUSTER-IP       EXTERNAL-IP       PORT(S)                                                                           AGE
    service/cloudcore   LoadBalancer   10.107.192.156   192.168.186.240   10000:30718/TCP,10001:30622/TCP,10002:30083/TCP,10003:30798/TCP,10004:31650/TCP   5m50s
    
    NAME                        READY   UP-TO-DATE   AVAILABLE   AGE
    deployment.apps/cloudcore   1/1     1            1           5m49s
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11

    因为边缘计算的硬件条件都不好,这里我们需要打上标签,让一些应用不扩展到edge节点上去,

    但凡是daemonset的都不可以去占用edge节点的硬件资源

    kubectl get daemonset -n kube-system |grep -v NAME |awk '{print $1}' | xargs -n 1 kubectl patch daemonset -n kube-system --type='json' -p='[{"op": "replace","path": "/spec/template/spec/affinity","value":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"node-role.kubernetes.io/edge","operator":"DoesNotExist"}]}]}}}}]'
    
    • 1
    kubectl get daemonset -n metallb-system |grep -v NAME |awk '{print $1}' | xargs -n 1 kubectl patch daemonset -n metallb-system --type='json' -p='[{"op": "replace","path": "/spec/template/spec/affinity","value":{"nodeAffinity":{"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[{"matchExpressions":[{"key":"node-role.kubernetes.io/edge","operator":"DoesNotExist"}]}]}}}}]'
    
    • 1

    4. 部署edgecore

    # 安装docker
    yum install -y yum-utils device-mapper-persistent-data lvm2
    yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
    yum makecache fast
    yum -y install docker-ce-20.10.0-3.el7   #这里指定版本
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    # 生成docker配置文件
    mkdir -p /etc/docker/
    touch /etc/docker/daemon.json
    cat > /etc/docker/daemon.json << EOF
    {
    "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","http://qtid6917.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
       "exec-opts": ["native.cgroupdriver=cgroupfs"]
    }
    EOF
    
    # 启动docker
    systemctl enable docker --now
    systemctl start docker
    systemctl status docker
    docker --version
    # 这里注意:cgroupdriver=cgroupfs。修改为systemd则会出现两者之间不一致的问题
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    #获取keadm工具
    wget https://github.com/kubeedge/kubeedge/releases/download/v1.13.1/keadm-v1.13.1-linux-amd64.tar.gz
    
    tar -zxvf keadm-v1.13.1-linux-amd64.tar.gz  #解压
    cp keadm-v1.13.1-linux-amd64/keadm/keadm /usr/local/bin/  #复制keadm
    keadm version  #测试
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    docker load -i coreedge.tar   &&  docker load -i    image.tar    &&   docker load -i   kubeedge.tar  &&     docker load -i    metallb_image.tar &&    docker load -i    metrics-server.tar &&    docker load -i   nginx.tar
    
    • 1
    #请注意token和ip和端口(端口是固定的)
    keadm join --token=f28b7497c541833c1d7f8ebcdf4532ff82d297a75fb879bb6b1d793c06f1137a.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODUyNDgzNzB9.LzeoF_7GVgajogyp_8k8co37u6OC8NCxGWb3E8OzNUU   --cloudcore-ipport=192.168.186.240:10000  --kubeedge-version=v1.13.1
    
    • 1
    • 2

    执行后报错如下:

    [root@ke-edge1 ~]# keadm join --token=f28b7497c541833c1d7f8ebcdf4532ff82d297a75fb879bb6b1d793c06f1137a.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODUyNDgzNzB9.LzeoF_7GVgajogyp_8k8co37u6OC8NCxGWb3E8OzNUU   --cloudcore-ipport=192.168.186.240:10000  --kubeedge-version=v1.13.1
    I0527 00:54:53.977429   23811 command.go:845] 1. Check KubeEdge edgecore process status
    I0527 00:54:54.483729   23811 command.go:845] 2. Check if the management directory is clean
    I0527 00:54:54.605584   23811 join.go:107] 3. Create the necessary directories
    I0527 00:54:55.208656   23811 join.go:184] 4. Pull Images
    Pulling kubeedge/installation-package:v1.13.1 ...
    E0527 00:54:55.209959   23811 remote_image.go:160] "Get ImageStatus from image service failed" err="rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.ImageService" image="kubeedge/installation-package:v1.13.1"
    Error: edge node join failed: pull Images failed: rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.ImageService
    execute keadm command failed:  edge node join failed: pull Images failed: rpc error: code = Unimplemented desc = unknown service runtime.v1alpha2.ImageService
    [root@ke-edge1 ~]#
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10

    解决办法:

    rm -rf /etc/containerd/config.toml
    containerd config default > /etc/containerd/config.toml
    systemctl restart containerd
    
    • 1
    • 2
    • 3

    请注意,增加了参数

     keadm join --token=f28b7497c541833c1d7f8ebcdf4532ff82d297a75fb879bb6b1d793c06f1137a.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODUyNDgzNzB9.LzeoF_7GVgajogyp_8k8co37u6OC8NCxGWb3E8OzNUU   --cloudcore-ipport=192.168.186.240:10000  --kubeedge-version=v1.13.1 --runtimetype=docker
    
    • 1

    1.13版本默认使用containerd,如果需要使用docker,runtimetype和remote runtime endpoint都要在keadm join时指定

    [root@ke-edge1 ~]# rm -fr /etc/kubeedge/
    [root@ke-edge1 ~]# keadm join --token=f28b7497c541833c1d7f8ebcdf4532ff82d297a75fb879bb6b1d793c06f1137a.eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2ODUyNDgzNzB9.LzeoF_7GVgajogyp_8k8co37u6OC8NCxGWb3E8OzNUU   --cloudcore-ipport=192.168.186.240:10000  --kubeedge-version=v1.13.1 --runtimetype=docker
    I0527 00:58:27.012345   24169 command.go:845] 1. Check KubeEdge edgecore process status
    I0527 00:58:27.022045   24169 command.go:845] 2. Check if the management directory is clean
    I0527 00:58:27.022111   24169 join.go:107] 3. Create the necessary directories
    I0527 00:58:27.024162   24169 join.go:184] 4. Pull Images
    Pulling kubeedge/installation-package:v1.13.1 ...
    Pulling eclipse-mosquitto:1.6.15 ...
    Pulling kubeedge/pause:3.6 ...
    I0527 00:58:27.035700   24169 join.go:184] 5. Copy resources from the image to the management directory
    I0527 00:58:34.218860   24169 join.go:184] 6. Start the default mqtt service
    I0527 00:58:34.224341   24169 join.go:107] 7. Generate systemd service file
    I0527 00:58:34.239674   24169 join.go:107] 8. Generate EdgeCore default configuration
    I0527 00:58:34.239737   24169 join.go:270] The configuration does not exist or the parsing fails, and the default configuration is generated
    W0527 00:58:34.247643   24169 validation.go:71] NodeIP is empty , use default ip which can connect to cloud.
    I0527 00:58:34.270597   24169 join.go:107] 9. Run EdgeCore daemon
    I0527 00:58:39.891427   24169 join.go:435] 
    I0527 00:58:39.891484   24169 join.go:436] KubeEdge edgecore is running, For logs visit: journalctl -u edgecore.service -xe
    [root@ke-edge1 ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    [root@ke-edge1 ~]# systemctl status edgecore #检查这个服务正常
    ● edgecore.service
       Loaded: loaded (/etc/systemd/system/edgecore.service; enabled; vendor preset: disabled)
       Active: active (running) since Sat 2023-05-27 00:58:37 EDT; 1min 57s ago
     Main PID: 24314 (edgecore)
        Tasks: 15
       Memory: 29.0M
       CGroup: /system.slice/edgecore.service
               └─24314 /usr/local/bin/edgecore
    
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.980371   24314 client.go:89] edge-hub-cli subscribe topic to $hw/events/upload/#
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.980494   24314 client.go:153] finish hub-client pub
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.980510   24314 eventbus.go:71] Init Sub And Pub Client for external mqtt broker tcp://127.0.0.1:...ccessfully
    May 27 00:59:05 ke-edge1 edgecore[24314]: W0527 00:59:05.980528   24314 eventbus.go:168] Action not found
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.980678   24314 client.go:89] edge-hub-cli subscribe topic to $hw/events/device/+/state/update
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.981141   24314 client.go:89] edge-hub-cli subscribe topic to $hw/events/device/+/twin/+
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.981834   24314 client.go:89] edge-hub-cli subscribe topic to $hw/events/node/+/membership/get
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.983192   24314 client.go:89] edge-hub-cli subscribe topic to SYS/dis/upload_records
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.983936   24314 client.go:89] edge-hub-cli subscribe topic to +/user/#
    May 27 00:59:05 ke-edge1 edgecore[24314]: I0527 00:59:05.984966   24314 client.go:97] list edge-hub-cli-topics status, no record, skip sync
    Hint: Some lines were ellipsized, use -l to show in full.
    [root@ke-edge1 kubeedge]# ll -ah  #此目录下文件生成正常
    total 12K
    drwxr-xr-x.  5 root root   59 May 27 00:58 .
    drwxr-xr-x. 78 root root 8.0K May 27 00:58 ..
    drwxr-xr-x.  2 root root   24 May 27 00:58 ca
    drwxr-xr-x.  2 root root   42 May 27 00:58 certs
    drwxr-xr-x.  2 root root   27 May 27 00:58 config
    srwxr-xr-x.  1 root root    0 May 27 00:58 dmi.sock
    [root@ke-edge1 kubeedge]# cd config/
    [root@ke-edge1 config]# ll
    total 8
    -rw-r--r--. 1 root root 4940 May 27 00:58 edgecore.yaml
    [root@ke-edge1 config]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    [root@k8s-ke-cloud ~]# kubectl get nodes  #看到边缘节点添加进来了
    NAME           STATUS   ROLES                  AGE     VERSION
    k8s-ke-cloud   Ready    control-plane,master   3h18m   v1.22.17
    k8s-ke-edge1   Ready    <none>                 3h11m   v1.22.17
    ke-edge1       Ready    agent,edge             4m19s   v1.23.15-kubeedge-v1.13.1
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    4. 测试将pod运行到边缘节点上

    #在master上操作
    cat > nginx.yaml << EOF
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx-deployment
    spec:
      selector:
        matchLabels:
          app: nginx
      replicas: 3  # 可根据需要进行调整
      template:
        metadata:
          labels:
            app: nginx
        spec:
          nodeName: ke-edge1  #如果是集群需要指定调度到变暖
          containers:
          - name: nginx
            image: nginx
            ports:
            - containerPort: 80
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx-service
    spec:
      selector:
        app: nginx
      ports:
        - protocol: TCP
          port: 80
          targetPort: 80
      type: LoadBalancer
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    kubectl apply -f nginx.yaml
    kubectl get pods,svc -o wide
    
    • 1
    • 2
    [root@k8s-ke-cloud ~]# kubectl get pods,svc -o wide
    NAME                                    READY   STATUS    RESTARTS   AGE    IP           NODE       NOMINATED NODE   READINESS GATES
    pod/nginx-deployment-6bd7b664fd-p8xcb   1/1     Running   0          115s   172.17.0.4   ke-edge1   <none>           <none>
    pod/nginx-deployment-6bd7b664fd-tr8lk   1/1     Running   0          115s   172.17.0.5   ke-edge1   <none>           <none>
    pod/nginx-deployment-6bd7b664fd-wxc8p   1/1     Running   0          115s   172.17.0.3   ke-edge1   <none>           <none>
    
    NAME                    TYPE           CLUSTER-IP       EXTERNAL-IP       PORT(S)        AGE     SELECTOR
    service/kubernetes      ClusterIP      10.96.0.1        <none>            443/TCP        3h51m   <none>
    service/nginx-service   LoadBalancer   10.105.176.182   192.168.186.241   80:31667/TCP   115s    app=nginx
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    [root@k8s-ke-cloud ~]# kubectl get pods
    NAME                                READY   STATUS    RESTARTS   AGE
    nginx-deployment-6bd7b664fd-p8xcb   1/1     Running   0          6m39s
    nginx-deployment-6bd7b664fd-tr8lk   1/1     Running   0          6m39s
    nginx-deployment-6bd7b664fd-wxc8p   1/1     Running   0          6m39s
    [root@k8s-ke-cloud ~]# kubectl logs nginx-deployment-6bd7b664fd-p8xcb
    Error from server: Get "https://192.168.186.130:10350/containerLogs/default/nginx-deployment-6bd7b664fd-p8xcb/nginx": dial tcp 192.168.186.130:10350: connect: connection refused
    [root@k8s-ke-cloud ~]# iptables -t nat -A OUTPUT -p tcp --dport 10350 -j DNAT --to 192.168.186.240:10003 
    [root@k8s-ke-cloud ~]# iptables -t nat -A OUTPUT -p tcp --dport 10351 -j DNAT --to 192.168.186.240:10003 
    [root@k8s-ke-cloud ~]# kubectl logs nginx-deployment-6bd7b664fd-p8xcb
    Error from server (InternalError): Internal error occurred: 
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    [root@ke-edge1 ~]# vim /etc/kubeedge/config/edgecore.yaml  #修改edgeStream下的enable: true为这个
     edgeStream:
        enable: true   #只修改这个
        handshakeTimeout: 30
        readDeadline: 15
    
    • 1
    • 2
    • 3
    • 4
    • 5
    [root@ke-edge1 ~]# systemctl restart edgecore
    [root@ke-edge1 ~]# systemctl status  edgecore
    
    • 1
    • 2
    [root@k8s-ke-cloud ~]# kubectl logs nginx-deployment-6bd7b664fd-p8xcb
    /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
    /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
    10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
    10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
    /docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
    /docker-entrypoint.sh: Configuration complete; ready for start up
    2023/05/27 05:34:40 [notice] 1#1: using the "epoll" event method
    2023/05/27 05:34:40 [notice] 1#1: nginx/1.25.2
    2023/05/27 05:34:40 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14) 
    2023/05/27 05:34:40 [notice] 1#1: OS: Linux 3.10.0-1160.el7.x86_64
    2023/05/27 05:34:40 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576
    2023/05/27 05:34:40 [notice] 1#1: start worker processes
    2023/05/27 05:34:40 [notice] 1#1: start worker process 30
    2023/05/27 05:34:40 [notice] 1#1: start worker process 31
    2023/05/27 05:34:40 [notice] 1#1: start worker process 32
    2023/05/27 05:34:40 [notice] 1#1: start worker process 33
    [root@k8s-ke-cloud ~]# 
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    [root@ke-edge1 ~]# docker ps -a
    CONTAINER ID   IMAGE                COMMAND                  CREATED          STATUS          PORTS                                            NAMES
    b78081847f20   nginx                "/docker-entrypoint.…"   12 minutes ago   Up 12 minutes                                                    k8s_nginx_nginx-deployment-6bd7b664fd-tr8lk_default_814d1540-7c75-40f9-9351-4081bde1e734_0
    b349fb74b005   kubeedge/pause:3.6   "/pause"                 13 minutes ago   Up 13 minutes                                                    k8s_POD_nginx-deployment-6bd7b664fd-tr8lk_default_814d1540-7c75-40f9-9351-4081bde1e734_0
    d5e5c3f70530   nginx                "/docker-entrypoint.…"   13 minutes ago   Up 13 minutes                                                    k8s_nginx_nginx-deployment-6bd7b664fd-wxc8p_default_ce3b30a9-71aa-44c1-af57-994f6f47627d_0
    183b213b78ea   nginx                "/docker-entrypoint.…"   13 minutes ago   Up 13 minutes                                                    k8s_nginx_nginx-deployment-6bd7b664fd-p8xcb_default_9166c2b8-63d2-43ad-b34c-e8a7b8bd8773_0
    8c587176a71e   kubeedge/pause:3.6   "/pause"                 14 minutes ago   Up 14 minutes                                                    k8s_POD_nginx-deployment-6bd7b664fd-p8xcb_default_9166c2b8-63d2-43ad-b34c-e8a7b8bd8773_0
    1b1643dd64a2   kubeedge/pause:3.6   "/pause"                 14 minutes ago   Up 14 minutes                                                    k8s_POD_nginx-deployment-6bd7b664fd-wxc8p_default_ce3b30a9-71aa-44c1-af57-994f6f47627d_0
    6138bea41aa3   5dade4ce550b         "/docker-entrypoint.…"   49 minutes ago   Up 49 minutes                                                    k8s_mqtt_mqtt-kubeedge_default_32597cea-87d8-4137-bb4d-0fa08804b88d_0
    77a02d19d08e   kubeedge/pause:3.6   "/pause"                 49 minutes ago   Up 49 minutes   0.0.0.0:1883->1883/tcp, 0.0.0.0:9001->9001/tcp   k8s_POD_mqtt-kubeedge_default_32597cea-87d8-4137-bb4d-0fa08804b88d_0
    [root@ke-edge1 ~]# docker logs b78081847f20
    /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
    /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
    10-listen-on-ipv6-by-default.sh: info: Getting the checksum of /etc/nginx/conf.d/default.conf
    10-listen-on-ipv6-by-default.sh: info: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
    /docker-entrypoint.sh: Sourcing /docker-entrypoint.d/15-local-resolvers.envsh
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
    /docker-entrypoint.sh: Launching /docker-entrypoint.d/30-tune-worker-processes.sh
    /docker-entrypoint.sh: Configuration complete; ready for start up
    2023/05/27 05:35:21 [notice] 1#1: using the "epoll" event method
    2023/05/27 05:35:21 [notice] 1#1: nginx/1.25.2
    2023/05/27 05:35:21 [notice] 1#1: built by gcc 12.2.0 (Debian 12.2.0-14) 
    2023/05/27 05:35:21 [notice] 1#1: OS: Linux 3.10.0-1160.el7.x86_64
    2023/05/27 05:35:21 [notice] 1#1: getrlimit(RLIMIT_NOFILE): 1048576:1048576
    2023/05/27 05:35:21 [notice] 1#1: start worker processes
    2023/05/27 05:35:21 [notice] 1#1: start worker process 29
    2023/05/27 05:35:21 [notice] 1#1: start worker process 30
    2023/05/27 05:35:21 [notice] 1#1: start worker process 31
    2023/05/27 05:35:21 [notice] 1#1: start worker process 32
    [root@ke-edge1 ~]#
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31

    测试没有问题,到这里部署就成功了。

  • 相关阅读:
    Helium自动化框架 元素定位
    相关回归常见问题
    LeetCode 790. 多米诺和托米诺平铺
    基于jsp+mysql+ssm在线音乐网站-计算机毕业设计
    西安凯新(CAS:2408831-65-0)Biotin-PEG4-Acrylamide 特性
    Deepstream 6.1.1 以及 Python Binding 安装过程记录
    烟台个人开发者申请软件著作权的好处
    IGBT和SiC MOSFET的驱动参数的计算方法
    SpringBoot集成security
    小程序也能是App中的引流神器?
  • 原文地址:https://blog.csdn.net/qq_14910065/article/details/133957835