• Kubesphere-多节点安装


    主机清单

    序号IP主机名配置
    1192.168.85.161k8s-master8VCPU 32GB
    2192.168.85.162k8s-node18VCPU 32GB
    3192.168.85.163k8s-node28VCPU 32GB
    4192.168.85.164nfs-server8VCPU 32GB

    基础环境准备

    nfs服务器安装部署(略)

    1. 安装docker

    curl -sSL https://get.daocloud.io/docker | sh
    # 加速
    curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
    
    • 1
    • 2
    • 3

    2. 安装依赖

    yum -y install socat conntrack ebtables ipset net-tools
    
    • 1

    3. 配置ssh互访

    # 所有节点上执行
    ssh-keygen
    ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.85.161
    ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.85.162
    ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.85.163
    
    • 1
    • 2
    • 3
    • 4
    • 5

    4. 下载 KubeKey

    # 只在master上执行即可
    export KKZONE=cn
    curl -sfL https://get-kk.kubesphere.io | VERSION=v2.0.0 sh -
    
    • 1
    • 2
    • 3

    创建集群

    1. 创建示例配置文件

    ./kk create config --with-kubesphere
    
    • 1

    2. 编辑配置文件

    插件根据使用情况和服务器资源大小,自行安装。

    apiVersion: kubekey.kubesphere.io/v1alpha2
    kind: Cluster
    metadata:
      name: sample
    spec:
      hosts:   # 配置主机清单
      - {name: k8s-master, address: 192.168.85.161, internalAddress: 192.168.85.161, privateKeyPath: "~/.ssh/id_rsa"}
      - {name: k8s-node1, address: 192.168.85.162, internalAddress: 192.168.85.162, privateKeyPath: "~/.ssh/id_rsa"}
      - {name: k8s-node2, address: 192.168.85.163, internalAddress: 192.168.85.163, privateKeyPath: "~/.ssh/id_rsa"}
      roleGroups:
        etcd:    # etcd 节点列表
        - k8s-master  
        control-plane:    # 主机节点列表
        - k8s-master  
        worker:    # 工作节点列表
        - k8s-node1
        - k8s-node2
      controlPlaneEndpoint:
        ## Internal loadbalancer for apiservers 
        # internalLoadbalancer: haproxy
    
        domain: lb.kubesphere.local
        address: ""
        port: 6443
      kubernetes:
        version: v1.21.5
        clusterName: cluster.local
      network:
        plugin: calico
        kubePodsCIDR: 10.233.64.0/18
        kubeServiceCIDR: 10.233.0.0/18
        multusCNI:
          enabled: false
      registry:
        plainHTTP: false
        privateRegistry: ""
        namespaceOverride: ""
        registryMirrors: []
        insecureRegistries: []
      addons: 
      - name: nfs-client    # 使用nfs作为持久化存储
        namespace: kube-system
        sources: 
          chart: 
            name: nfs-client-provisioner
            repo: https://charts.kubesphere.io/main
            values:
            - storageClass.defaultClass=true
            - nfs.server=192.168.85.164
            - nfs.path=/data
    
    ---
    apiVersion: installer.kubesphere.io/v1alpha1
    kind: ClusterConfiguration
    metadata:
      name: ks-installer
      namespace: kubesphere-system
      labels:
        version: v3.2.1
    spec:
      persistence:
        storageClass: ""
      authentication:
        jwtSecret: ""
      local_registry: ""
      namespace_override: ""
      # dev_tag: ""
      etcd:
        monitoring: true  # 根据情况决定是否开启
        endpointIps: localhost
        port: 2379
        tlsEnable: true
      common:
        core:
          console:
            enableMultiLogin: true
            port: 30880
            type: NodePort
        # apiserver:
        #  resources: {}
        # controllerManager:
        #  resources: {}
        redis:
          enabled: true  # 根据情况决定是否开启
          volumeSize: 2Gi
        openldap:
          enabled: true  # 根据情况决定是否开启
          volumeSize: 2Gi
        minio:
          volumeSize: 20Gi
        monitoring:
          # type: external
          endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
          GPUMonitoring:
            enabled: true  # 根据情况决定是否开启
        gpu:
          kinds:         
          - resourceName: "nvidia.com/gpu"
            resourceType: "GPU"
            default: true
        es:
          # master:
          #   volumeSize: 4Gi
          #   replicas: 1
          #   resources: {}
          # data:
          #   volumeSize: 20Gi
          #   replicas: 1
          #   resources: {}
          logMaxAge: 7
          elkPrefix: logstash
          basicAuth:
            enabled: false  # 是否使用验证
            username: ""
            password: ""
          externalElasticsearchHost: ""
          externalElasticsearchPort: ""
      alerting:
        enabled: true  # 根据情况决定是否开启
        # thanosruler:
        #   replicas: 1
        #   resources: {}
      auditing:
        enabled: true  # 根据情况决定是否开启
        # operator:
        #   resources: {}
        # webhook:
        #   resources: {}
      devops:
        enabled: true  # 根据情况决定是否开启
        jenkinsMemoryLim: 2Gi
        jenkinsMemoryReq: 1500Mi
        jenkinsVolumeSize: 8Gi
        jenkinsJavaOpts_Xms: 512m
        jenkinsJavaOpts_Xmx: 512m
        jenkinsJavaOpts_MaxRAM: 2g
      events:
        enabled: true
        # operator:
        #   resources: {}
        # exporter:
        #   resources: {}
        # ruler:
        #   enabled: true
        #   replicas: 2
        #   resources: {}
      logging:
        enabled: true
        containerruntime: docker
        logsidecar:
          enabled: true
          replicas: 2
          # resources: {}
      metrics_server:
        enabled: true
      monitoring:
        storageClass: ""
        # kube_rbac_proxy:
        #   resources: {}
        # kube_state_metrics:
        #   resources: {}
        # prometheus:
        #   replicas: 1
        #   volumeSize: 20Gi
        #   resources: {}
        #   operator:
        #     resources: {}
        #   adapter:
        #     resources: {}
        # node_exporter:
        #   resources: {}
        # alertmanager:
        #   replicas: 1
        #   resources: {}
        # notification_manager:
        #   resources: {}
        #   operator:
        #     resources: {}
        #   proxy:
        #     resources: {}
        gpu:
          nvidia_dcgm_exporter:
            enabled: true
            # resources: {}
      multicluster:
        clusterRole: none 
      network:
        networkpolicy:
          enabled: true
        ippool:
          type: none
        topology:
          type: none
      openpitrix:
        store:
          enabled: false
      servicemesh:
        enabled: true
      kubeedge:
        enabled: false   
        cloudCore:
          nodeSelector: {"node-role.kubernetes.io/worker": ""}
          tolerations: []
          cloudhubPort: "10000"
          cloudhubQuicPort: "10001"
          cloudhubHttpsPort: "10002"
          cloudstreamPort: "10003"
          tunnelPort: "10004"
          cloudHub:
            advertiseAddress:
              - ""
            nodeLimit: "100"
          service:
            cloudhubNodePort: "30000"
            cloudhubQuicNodePort: "30001"
            cloudhubHttpsNodePort: "30002"
            cloudstreamNodePort: "30003"
            tunnelNodePort: "30004"
        edgeWatcher:
          nodeSelector: {"node-role.kubernetes.io/worker": ""}
          tolerations: []
          edgeWatcherAgent:
            nodeSelector: {"node-role.kubernetes.io/worker": ""}
            tolerations: []
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223
    • 224

    3. 使用配置文件创建集群

    ./kk create cluster -f config-sample.yaml
    
    • 1
    1. 验证安装

    安装完成后,您会看到如下内容:

    #####################################################
    ###              Welcome to KubeSphere!           ###
    #####################################################
    
    Console: http://192.168.85.161:30880
    Account: admin
    Password: P@88w0rd
    
    NOTES:
      1. After you log into the console, please check the
         monitoring status of service components in
         "Cluster Management". If any service is not
         ready, please wait patiently until all components
         are up and running.
      2. Please change the default password after login.
    
    #####################################################
    https://kubesphere.io             2022-07-28 03:10:29
    #####################################################
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19

    安装openelb

    1. 安装

    kubectl apply -f https://raw.githubusercontent.com/openelb/openelb/master/deploy/openelb.yaml
    
    • 1

    2. 启用strictARP

    需要为 kube-proxy 启用 strictARP,以便 Kubernetes 集群中的所有网卡停止响应其他网卡的 ARP 请求,而由 OpenELB 处理 ARP 请求。

    kubectl edit configmap kube-proxy -n kube-system
    ...
    ipvs:
      strictARP: true
    ...
    
    • 1
    • 2
    • 3
    • 4
    • 5

    然后执行下面的命令重启 kube-proxy 组件即可:

    kubectl rollout restart daemonset kube-proxy -n kube-system
    
    • 1

    3. 配置EIP

    apiVersion: network.kubesphere.io/v1alpha2
    kind: Eip
    metadata:
        name: eip-pool
        annotations:
          eip.openelb.kubesphere.io/is-default-eip: "true"
    spec:
        address: 192.168.85.91-192.168.85.100  # 地址池
        protocol: layer2  # 协议:BGP, Layer 2, or VIP
        interface: eth0  # 网卡
        disable: false
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11

    4. 如何使用

    1. 创建deployment

    1. 创建yaml文件
    vi layer2-openelb.yaml
    
    • 1
    1. 写入以下内容
    apiVersion: apps/v1
    kind: Deployment
    metadata:
     name: layer2-openelb
    spec:
     replicas: 2
     selector:
       matchLabels:
         app: layer2-openelb
     template:
       metadata:
         labels:
           app: layer2-openelb
       spec:
         containers:
           - image: luksa/kubia
             name: kubia
             ports:
               - containerPort: 8080
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    1. 应用yaml文件创建deployment
    kubectl apply -f layer2-openelb.yaml
    
    • 1

    2. 创建service

    1. 创建yaml文件
    vi layer2-svc.yaml
    
    • 1
    1. 写入以下内容
    kind: Service
    apiVersion: v1
    metadata:
     name: layer2-svc
     annotations:  # 需要添加以下3个注解
       lb.kubesphere.io/v1alpha1: openelb
       protocol.openelb.kubesphere.io/v1alpha1: layer2
       eip.openelb.kubesphere.io/v1alpha2: eip-pool  # eip地址池名称
    spec:
     selector:
       app: layer2-openelb
     type: LoadBalancer
     ports:
       - name: http
         port: 80
         targetPort: 8080
     externalTrafficPolicy: Cluster
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    1. 应用yaml文件创建service
    kubectl apply -f layer2-svc.yaml
    
    • 1

    3. 验证

    [root@k8s-master ~]# kubectl get svc
    NAME         TYPE           CLUSTER-IP      EXTERNAL-IP     PORT(S)        AGE
    kubernetes   ClusterIP      10.233.0.1      <none>          443/TCP        34m
    layer2-svc   LoadBalancer   10.233.49.106   192.168.85.91   80:31929/TCP   4s
    
    [root@k8s-master ~]# curl 192.168.85.91
    You've hit layer2-openelb-7b4fdf6f85-nvsws
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
  • 相关阅读:
    Linux常用命令
    LabVIEW利用纳米结构干电极控制神经肌肉活动
    对话框管理器第八章:对话框中的自定义导航
    输入电压转化为电流性 5~20mA方案
    qt模块依赖
    云原生Docker网络管理
    JDBC的事务自动提交机制的演示
    算法的概述
    Android13集成paho.mqtt.android启动异常
    基于Python的网络爬虫开发与实现
  • 原文地址:https://blog.csdn.net/u011143903/article/details/126040543