• k8s部署手册-v06


    一、基础配置

    1.修改主机名

    hostnamectl set-hostname k8s-master01
    hostnamectl set-hostname k8s-master02
    hostnamectl set-hostname k8s-master03
    hostnamectl set-hostname k8s-node01
    hostnamectl set-hostname k8s-node02
    
    • 1
    • 2
    • 3
    • 4
    • 5

    2.添加 主机名与IP地址解析

    cat > /etc/hosts <<EOF
    127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
    ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
    
    
    192.168.1.220 k8s-master01
    192.168.1.221 k8s-master02
    192.168.1.222 k8s-master03
    192.168.1.223 k8s-node01
    192.168.1.224 k8s-node02
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11

    3.升级服务器,时间同步,关闭防火墙,重启服务器

    #添加访问互联路由
    cat > /etc/resolv.conf <<EOF
    nameserver 114.114.114.114
    nameserver 8.8.8.8
    EOF
    
    cat /etc/resolv.conf
    
    # ssh连接Linux比较慢
    #sed -i "s|#UseDNS yes|UseDNS no|" /etc/ssh/sshd_config
    #sed -i "s|GSSAPIAuthentication yes|GSSAPIAuthentication no|" /etc/ssh/sshd_config
    
    
    #设置为阿里云yum源
    
    rm -rf /etc/yum.repos.d/bak && mkdir -p /etc/yum.repos.d/bak && mv /etc/yum.repos.d/* /etc/yum.repos.d/bak
    
    curl -o /etc/yum.repos.d/CentOS-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    
    
    yum clean all && yum makecache
    
    cd /etc/yum.repos.d
    
    
    #CentOS7使用/etc/rc.d/rc.local设置开机自动启动
    chmod +x /etc/rc.d/rc.local
    
    
    # 将vim设置为黏贴模式,防止复制时自动缩进
    echo "set paste" >> /root/.vimrc
    
    
    #安装依赖包
    
    yum -y install vim net-tools lrzsz unzip gcc telnet wget sshpass ntpdate ntp curl
    
    yum -y install conntrack ipvsadm ipset  iptables  sysstat libseccomp git  
    
    #时间同步
    echo '*/5 * * * * /usr/sbin/ntpdate ntp1.aliyun.com >/dev/null 2>&1'>/var/spool/cron/root && crontab -l
    
    
    
    #设置防火墙为 Iptables 并设置空规则
    systemctl  stop firewalld  &&  systemctl  disable firewalld
    
    yum -y install iptables-services  &&  systemctl  start iptables  &&  systemctl  enable iptables  &&  iptables -F  &&  service iptables save
    
    
    #关闭 SELINUX
    swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
    setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
    
    #调整内核参数,对于 K8S
    cat > /etc/sysctl.d/kubernetes.conf <<EOF
    net.bridge.bridge-nf-call-iptables=1
    net.bridge.bridge-nf-call-ip6tables=1
    net.ipv4.ip_forward=1
    #net.ipv4.tcp_tw_recycle=0
    vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
    vm.overcommit_memory=1 # 不检查物理内存是否够用
    vm.panic_on_oom=0 # 开启 OOM  
    fs.inotify.max_user_instances=8192
    fs.inotify.max_user_watches=1048576
    fs.file-max=52706963
    fs.nr_open=52706963
    net.ipv6.conf.all.disable_ipv6=1
    net.netfilter.nf_conntrack_max=2310720
    EOF
    
    modprobe ip_vs_rr && modprobe br_netfilter && sysctl -p /etc/sysctl.d/kubernetes.conf
    
    
    #关闭系统不需要服务
    systemctl stop postfix && systemctl disable postfix
    
    
    
    #  k8s持久化保存日志的目录
    mkdir /var/log/journal 
    mkdir /etc/systemd/journald.conf.d
    cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
    [Journal]
    # 持久化保存到磁盘
    Storage=persistent
     
    # 压缩历史日志
    Compress=yes
     
    SyncIntervalSec=5m
    RateLimitInterval=30s
    RateLimitBurst=1000
     
    # 最大占用空间 10G
    SystemMaxUse=10G
     
    # 单日志文件最大 200M
    SystemMaxFileSize=200M
     
    # 日志保存时间 36 周
    MaxRetentionSec=36week
     
    # 不将日志转发到 syslog
    ForwardToSyslog=no
    EOF
    
    systemctl restart systemd-journald
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108

    4.升级内核,重启服务器

    # 导入elrepo gpg key
    rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
    
    
    # 安装elrepo YUM源仓库
    yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
    
    # 安装kernel-ml版本,ml为长期稳定版本,lt为长期维护版本
    yum --enablerepo="elrepo-kernel" -y install kernel-lt.x86_64
    
    
    # 设置grub2默认引导为0
    grub2-set-default 0
    
    
    # 重新生成grub2引导文件
    grub2-mkconfig -o /boot/grub2/grub.cfg
    
    
    # 更新后,需要重启,使用升级的内核生效。
    reboot
    
    
    # 重启后,需要验证内核是否为更新对应的版本
    uname -r
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27

    二、sealos部署k8s-v1.19

    1.安装sealos3.3

    
    #添加访问互联路由
    cat > /etc/resolv.conf <<EOF
    nameserver 8.8.8.8
    nameserver 114.114.114.114
    nameserver 223.5.5.5
    EOF
    
    cat /etc/resolv.conf
    
    #时间同步
    ntpdate ntp1.aliyun.com
    
     
    wget -c https://github.com/fanux/sealos/releases/download/v3.3.8/sealos
    
    #tar zxvf sealos*.tar.gz sealo
    chmod +x sealos && mv sealos /usr/bin
    
    sealos version
    
    #时间同步
    ntpdate ntp1.aliyun.com
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23

    2.离线安装k8s 1.19

    链接:https://pan.baidu.com/s/1F9sZoHBX1K1ihBP9rZSHBQ?pwd=jood 
    提取码:jood
    
    
    
    #时间同步
    ntpdate ntp1.aliyun.com
    
    #安装
    sealos init --passwd 1qaz@WSX \
    	--master 192.168.1.220 \
    	--master 192.168.1.221 \
    	--master 192.168.1.222 \
    	--node 192.168.1.223 \
    	--node 192.168.1.224 \
    	--pkg-url /root/kube1.19.16.tar.gz \
    	--version v1.19.16
    
    
    
    #安装
    sealos init --passwd 1qaz@WSX \
    	--master 192.168.1.115 \
    	--master 192.168.1.116 \
    	--master 192.168.1.117 \
    	--node 192.168.1.118 \
    	--node 192.168.1.119 \
    	--pkg-url /root/kube1.19.16.tar.gz \
    	--version v1.19.16
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30

    3.验证集群

    kubectl get nodes
     
    kubectl get pod -A
     
     
     
    #配置kubectl自动补全
    yum install -y bash-completion
    source /usr/share/bash-completion/bash_completion
    source <(kubectl completion bash)
    echo "source <(kubectl completion bash)" >> /etc/profile
    
    
    #查看污点
    kubectl describe node |grep -i taints
    
    #去除污点
    #kubectl taint node k8s-master02 node-role.kubernetes.io/master:NoSchedule-
    #kubectl taint node k8s-master03 node-role.kubernetes.io/master:NoSchedule-
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19

    4.sealos3.3常用命令

    #添加 node 节点:
    sealos join --node 192.168.1.63,192.168.1.64
    
     
    #添加master
    sealos join --master 192.168.1.61,192.168.1.62
     
     
    #删除 node 节点:
    sealos clean --node 192.168.1.63,192.168.1.64
     
     
    #删除 master 节点:
    sealos clean --master 192.168.1.61,192.168.1.62
     
    
    #重置集群
    sealos clean --all -f
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18

    5.安装top命令

    cat > /root/top.yaml <<EOF
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        k8s-app: metrics-server
        rbac.authorization.k8s.io/aggregate-to-admin: "true"
        rbac.authorization.k8s.io/aggregate-to-edit: "true"
        rbac.authorization.k8s.io/aggregate-to-view: "true"
      name: system:aggregated-metrics-reader
    rules:
    - apiGroups:
      - metrics.k8s.io
      resources:
      - pods
      - nodes
      verbs:
      - get
      - list
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        k8s-app: metrics-server
      name: system:metrics-server
    rules:
    - apiGroups:
      - ""
      resources:
      - pods
      - nodes
      - nodes/stats
      - namespaces
      - configmaps
      verbs:
      - get
      - list
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: RoleBinding
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server-auth-reader
      namespace: kube-system
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: Role
      name: extension-apiserver-authentication-reader
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server:system:auth-delegator
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:auth-delegator
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      labels:
        k8s-app: metrics-server
      name: system:metrics-server
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:metrics-server
    subjects:
    - kind: ServiceAccount
      name: metrics-server
      namespace: kube-system
    ---
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server
      namespace: kube-system
    spec:
      ports:
      - name: https
        port: 443
        protocol: TCP
        targetPort: https
      selector:
        k8s-app: metrics-server
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      labels:
        k8s-app: metrics-server
      name: metrics-server
      namespace: kube-system
    spec:
      selector:
        matchLabels:
          k8s-app: metrics-server
      strategy:
        rollingUpdate:
          maxUnavailable: 0
      template:
        metadata:
          labels:
            k8s-app: metrics-server
        spec:
          containers:
          - args:
            - --cert-dir=/tmp
            - --kubelet-insecure-tls
            - --secure-port=4443
            - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
            - --kubelet-use-node-status-port
            #这里可以自己把metrics-server做到自己的阿里云镜像里面,并把下面替换成自己的镜像地址
            image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/metrics-server:v0.4.3
            imagePullPolicy: IfNotPresent
            livenessProbe:
              failureThreshold: 3
              httpGet:
                path: /livez
                port: https
                scheme: HTTPS
              periodSeconds: 10
            name: metrics-server
            ports:
            - containerPort: 4443
              name: https
              protocol: TCP
            readinessProbe:
              failureThreshold: 3
              httpGet:
                path: /readyz
                port: https
                scheme: HTTPS
              periodSeconds: 10
            securityContext:
              readOnlyRootFilesystem: true
              runAsNonRoot: true
              runAsUser: 1000
            volumeMounts:
            - mountPath: /tmp
              name: tmp-dir
          nodeSelector:
            kubernetes.io/os: linux
          priorityClassName: system-cluster-critical
          serviceAccountName: metrics-server
          volumes:
          - emptyDir: {}
            name: tmp-dir
    ---
    apiVersion: apiregistration.k8s.io/v1
    kind: APIService
    metadata:
      labels:
        k8s-app: metrics-server
      name: v1beta1.metrics.k8s.io
    spec:
      group: metrics.k8s.io
      groupPriorityMinimum: 100
      insecureSkipTLSVerify: true
      service:
        name: metrics-server
        namespace: kube-system
      version: v1beta1
      versionPriority: 100
    EOF
    
    
    
    kubectl apply -f /root/top.yaml
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196

    三、部署nfs

    1.服务端

    #添加访问互联路由
    cat > /etc/resolv.conf <<EOF
    nameserver 114.114.114.114
    nameserver 8.8.8.8
    EOF
    
    # 我们这里在192.168.1.225上安装(在生产中,大家要提供作好NFS-SERVER环境的规划)
    yum -y install nfs-utils
     
    # 创建NFS挂载目录
    mkdir /nfs_dir
    chown nobody.nobody /nfs_dir
     
    # 修改NFS-SERVER配置
    echo '/nfs_dir *(rw,sync,no_root_squash)' > /etc/exports
     
    # 重启服务
    systemctl restart rpcbind.service
    systemctl restart nfs-utils.service 
    systemctl restart nfs-server.service 
     
    # 增加NFS-SERVER开机自启动
    systemctl enable  rpcbind.service
    systemctl enable  nfs-utils.service 
    systemctl enable  nfs-server.service 
     
    # 验证NFS-SERVER是否能正常访问
    #showmount -e 192.168.1.225
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28

    2.客户端

    #需要挂载的服务器执行
    mkdir /nfs_dir
    yum install nfs-utils -y
    
    #挂载
    mount 192.168.1.225:/nfs_dir /nfs_dir
    
    #添加开机挂载
    echo "mount 192.168.1.225:/nfs_dir /nfs_dir" >> /etc/rc.local
    
    
    cat /etc/rc.local
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14

    四、部署StorageClass

    1.创建nfs-sc.yaml

    cat > /root/nfs-sc.yaml <<EOF
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: nfs-client-provisioner
      namespace: kube-system
     
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: nfs-client-provisioner-runner
    rules:
      - apiGroups: [""]
        resources: ["persistentvolumes"]
        verbs: ["get", "list", "watch", "create", "delete"]
      - apiGroups: [""]
        resources: ["persistentvolumeclaims"]
        verbs: ["get", "list", "watch", "update"]
      - apiGroups: ["storage.k8s.io"]
        resources: ["storageclasses"]
        verbs: ["get", "list", "watch"]
      - apiGroups: [""]
        resources: ["events"]
        verbs: ["list", "watch", "create", "update", "patch"]
      - apiGroups: [""]
        resources: ["endpoints"]
        verbs: ["get", "list", "watch", "create", "update", "patch"]
     
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: run-nfs-client-provisioner
    subjects:
      - kind: ServiceAccount
        name: nfs-client-provisioner
        namespace: kube-system 
    roleRef:
      kind: ClusterRole
      name: nfs-client-provisioner-runner
      apiGroup: rbac.authorization.k8s.io
     
    ---
    kind: Deployment
    apiVersion: apps/v1
    metadata:
      name: nfs-provisioner-01
      namespace: kube-system
    spec:
      replicas: 1
      strategy:
        type: Recreate
      selector:
        matchLabels:
          app: nfs-provisioner-01
      template:
        metadata:
          labels:
            app: nfs-provisioner-01
        spec:
          serviceAccountName: nfs-client-provisioner
          containers:
            - name: nfs-client-provisioner
    #老版本插件使用jmgao1983/nfs-client-provisioner:latest
    #          image: jmgao1983/nfs-client-provisioner:latest
              image: vbouchaud/nfs-client-provisioner:latest
              imagePullPolicy: IfNotPresent
              volumeMounts:
                - name: nfs-client-root
                  mountPath: /persistentvolumes
              env:
                - name: PROVISIONER_NAME
                  value: nfs-provisioner-01  # 此处供应者名字供storageclass调用
                - name: NFS_SERVER
                  value: 192.168.1.225   # 填入NFS的地址
                - name: NFS_PATH
                  value: /nfs_dir   # 填入NFS挂载的目录
          volumes:
            - name: nfs-client-root
              nfs:
                server: 192.168.1.225   # 填入NFS的地址
                path: /nfs_dir   # 填入NFS挂载的目录
     
    ---
    apiVersion: storage.k8s.io/v1
    kind: StorageClass
    metadata:
      name: nfs-boge
    provisioner: nfs-provisioner-01
    # Supported policies: Delete、 Retain , default is Delete
    reclaimPolicy: Retain
    EOF
    
    
    
    
    #创建
    kubectl apply -f /root/nfs-sc.yaml
    
    #查看
    kubectl -n kube-system get pod
    
    
    kubectl get sc
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105

    五、harbor仓库搭建

    1.安装

     
    #目录/root上传文件docker-compose和harbor-offline-installer-v1.2.0.tgz
    
    
    mv /root/docker-compose /usr/local/bin/
    chmod a+x /usr/local/bin/docker-compose
     
    ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
     
    tar -zxvf harbor-offline-installer-v2.4.1.tgz
     
    mv harbor /usr/local/
     
    cd /usr/local/harbor/
     
    cp harbor.yml.tmpl harbor.yml
    
    #修改配置信息
    sed -i 's/hostname: reg.mydomain.com/hostname: 192.168.1.225/g' harbor.yml
    sed -i 's/https/#https/g' harbor.yml
    sed -i 's/port: 443/#port: 443/g' harbor.yml
    sed -i 's/certificate/#certificate/g' harbor.yml
    sed -i 's/private_key/#private_key/g' harbor.yml
    
    #数据库目录
    mkdir /data
    
    cat > /etc/docker/daemon.json <<EOF
    {
        "registry-mirrors": ["https://gv33cz42.mirror.aliyuncs.com", "https://registry.cn-hangzhou.aliyuncs.com", "https://registry.docker-cn.com", "https://docker.mirrors.ustc.edu.cn", "https://dockerhub.azk8s.cn"],
      "insecure-registries":
            ["192.168.1.225:80"],
        "exec-opts": ["native.cgroupdriver=systemd"],
        "max-concurrent-downloads": 20,
        "live-restore": true,
        "storage-driver": "overlay2",
        "max-concurrent-uploads": 10,
        "debug": true,
        "log-opts": {
        "max-size": "100m",
        "max-file": "10"
        }
    }
    EOF
    
    
     
    systemctl daemon-reload && systemctl restart docker && systemctl status docker
    
     
     
    #安装
    ./install.sh
     
     
    ## 重启harbor
    cd /usr/local/harbor/
    docker-compose down -v
    docker-compose up -d
    docker ps|grep harbor
    netstat -ntlp
    
    
    访问url
    http://192.168.1.225/
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66

    2.需要访问仓库的其他节点的 daemon.json添加如下内容

    
    cat > /etc/docker/daemon.json <<EOF
    {
      "registry-mirrors": [
         "https://bxsfpjcb.mirror.aliyuncs.com"
      ],
      "max-concurrent-downloads": 10,
      "log-driver": "json-file",
      "log-level": "warn",
      "log-opts": {
        "max-size": "10m",
        "max-file": "3"
        },
      "insecure-registries":
            ["127.0.0.1","192.168.1.225:80"],
      "data-root":"/var/lib/docker"
    }
    EOF
    
    
    
    #重启
    systemctl daemon-reload && systemctl restart docker && systemctl status docker
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25

    3.节点使用仓库

     
    #登入仓库网站
    docker login -u admin -p Harbor12345 192.168.1.225:80
    
    #下载镜像
    docker pull daocloud.io/library/nginx:1.9.1
    
    #给镜像打上标签
    docker tag daocloud.io/library/nginx:1.9.1 192.168.1.225:80/library/nginx:1.9.1
     
    #镜像上传
    docker push 192.168.1.225:80/library/nginx:1.9.1
    
    #删除镜像
    docker rmi 192.168.1.225:80/library/nginx:1.9.1
     
    #将镜像保存为本地tar文件,
    docker save k8s.gcr.io/coredns:1.7.0  > /root/coredns-v1.7.0.tar 
    
    
    #使用load加载tar文件
    docker load -i  /root/coredns-v1.7.0.tar
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22

    4.批量打包上传harbor镜像

    
    cd /root
    #查看服务器镜像名称
    docker images | awk 'NR!=1{print $1":"$2}' > 01-image-old.txt && cat 01-image-old.txt
    
    # /换成-
    rm -rf  02-image-sed.txt && cp 01-image-old.txt 02-image-sed.txt && sed -i  "s|/|-|g" 02-image-sed.txt  && cat /root/02-image-sed.txt
    
    
    #打标签harbor仓库
    
    vim /root/03-tar-image.sh 
    #####################################################
    #!/bin/sh
    old=/root/01-image-old.txt
    new=/root/02-image-sed.txt
    l=$(cat /root/01-image-old.txt| wc -l)
    for ((i=1 ; i<=$l ; i++))
    do
    a=$(sed -n "$i"p $old)
    b=$(sed -n "$i"p $new)
    #echo "update xxxx  set uid='$a' where uid='$b';"
    docker tag $a 192.168.1.225:80/library/$b
    done
    #####################################################
    
    
    
    
    #运行打仓库标签
    bash /root/03-tar-image.sh
    
    docker images |grep library
    
    
    
    #查看打标harbor仓库images名称
    docker images |grep 192.168.1.225 | awk '{print $1":"$2}'  > 04-tar-image.txt && cat 04-tar-image.txt
    
    #上传到harbor仓库
    for h in `cat 04-tar-image.txt`; do docker push $h; done
    
    
    
    
    #删除打标镜像
    for d in `cat 04-tar-image.txt`; do docker rmi $d; done
    docker images |grep library
    
    #删除创建的文件
    rm -rf /root/0*txt  03-tar-image.sh
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51

    六、kuboard界面管理

    1.下载地址

    curl -o kuboard-v3.yaml https://addons.kuboard.cn/kuboard/kuboard-v3-storage-class.yaml

    2.编辑yaml

    #编辑 kuboard-v3.yaml 文件中的配置,该部署文件中,有3处配置必须修改:storageClassName
    
    
      volumeClaimTemplates:
      - metadata:
          name: data
        spec:
          # 请填写一个有效的 StorageClass name
          storageClassName: nfs-boge
          accessModes: [ "ReadWriteMany" ]
          resources:
            requests:
              storage: 5Gi
    
    
    ---
    apiVersion: v1
    kind: PersistentVolumeClaim
    metadata:
      name: kuboard-data-pvc
      namespace: kuboard
    spec:
      storageClassName: nfs-boge
      accessModes:
        - ReadWriteOnce
      resources:
        requests:
          storage: 10Gi
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29

    3.执行

    kubectl create -f kuboard-v3.yaml
    
    kubectl get pod -n kuboard
    
    ############################################
    
    #访问
    http://192.168.1.220:30080/
    输入初始用户名和密码,并登录
        用户名: admin
        密码: Kuboard123
    #############################################	
    
    #查看错误
    journalctl -f -u kubelet.service
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15

    七、helm3安装

    1.helm包下载地址

     wget https://get.helm.sh/helm-v3.6.1-linux-amd64.tar.gz
    
    • 1

    2.安装helm

    #解压 && 移动到 /usr/bin 目录下:
    
    tar -xvf helm-v3.6.1-linux-amd64.tar.gz && cd linux-amd64/ && mv helm /usr/bin 
    
    
    #查看版本
    helm version
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7

    3.配置仓库

    #添加公用的仓库
    helm repo add incubator https://charts.helm.sh/incubator
    helm repo add bitnami https://charts.bitnami.com/bitnami
    # 配置helm微软源地址
    helm repo add stable http://mirror.azure.cn/kubernetes/charts
    # 配置helm阿里源地址
    helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
    
    helm repo add stable   https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
    helm repo add google  https://kubernetes-charts.storage.googleapis.com
    helm repo add jetstack https://charts.jetstack.io
    
    # 查看仓库
    helm repo list
    # 更新仓库
    helm repo update  
    
    # 删除仓库
    #helm repo remove  aliyun
    
    
    # helm list
    
    
    #解决 执行helm警告kube/config文件不安全问题
    chmod g-rw ~/.kube/config
    chmod o-r ~/.kube/config
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27

    八、haproxy+keepalived+ingress

    1.部署阿里云ingress

    mkdir -p /data/k8s/
    
    cd /data/k8s/
     
    cat > /data/k8s/aliyun-ingress-nginx.yaml <<EOF
    apiVersion: v1
    kind: Namespace
    metadata:
      name: ingress-nginx
      labels:
        app: ingress-nginx
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: nginx-ingress-controller
      namespace: ingress-nginx
      labels:
        app: ingress-nginx
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRole
    metadata:
      name: nginx-ingress-controller
      labels:
        app: ingress-nginx
    rules:
      - apiGroups:
          - ""
        resources:
          - configmaps
          - endpoints
          - nodes
          - pods
          - secrets
          - namespaces
          - services
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - "extensions"
          - "networking.k8s.io"
        resources:
          - ingresses
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - ""
        resources:
          - events
        verbs:
          - create
          - patch
      - apiGroups:
          - "extensions"
          - "networking.k8s.io"
        resources:
          - ingresses/status
        verbs:
          - update
      - apiGroups:
          - ""
        resources:
          - configmaps
        verbs:
          - create
      - apiGroups:
          - ""
        resources:
          - configmaps
        resourceNames:
          - "ingress-controller-leader-nginx"
        verbs:
          - get
          - update
    ---
    apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRoleBinding
    metadata:
      name: nginx-ingress-controller
      labels:
        app: ingress-nginx
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: nginx-ingress-controller
    subjects:
      - kind: ServiceAccount
        name: nginx-ingress-controller
        namespace: ingress-nginx
    ---
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        app: ingress-nginx
      name: nginx-ingress-lb
      namespace: ingress-nginx
    spec:
      # DaemonSet need:
      # ----------------
      type: ClusterIP
      # ----------------
      # Deployment need:
      # ----------------
    #  type: NodePort
      # ----------------
      ports:
      - name: http
        port: 80
        targetPort: 80
        protocol: TCP
      - name: https
        port: 443
        targetPort: 443
        protocol: TCP
      - name: metrics
        port: 10254
        protocol: TCP
        targetPort: 10254
      selector:
        app: ingress-nginx
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: nginx-configuration
      namespace: ingress-nginx
      labels:
        app: ingress-nginx
    data:
      keep-alive: "75"
      keep-alive-requests: "100"
      upstream-keepalive-connections: "10000"
      upstream-keepalive-requests: "100"
      upstream-keepalive-timeout: "60"
      allow-backend-server-header: "true"
      enable-underscores-in-headers: "true"
      generate-request-id: "true"
      http-redirect-code: "301"
      ignore-invalid-headers: "true"
      log-format-upstream: '{"@timestamp": "$time_iso8601","remote_addr": "$remote_addr","x-forward-for": "$proxy_add_x_forwarded_for","request_id": "$req_id","remote_user": "$remote_user","bytes_sent": $bytes_sent,"request_time": $request_time,"status": $status,"vhost": "$host","request_proto": "$server_protocol","path": "$uri","request_query": "$args","request_length": $request_length,"duration": $request_time,"method": "$request_method","http_referrer": "$http_referer","http_user_agent":  "$http_user_agent","upstream-sever":"$proxy_upstream_name","proxy_alternative_upstream_name":"$proxy_alternative_upstream_name","upstream_addr":"$upstream_addr","upstream_response_length":$upstream_response_length,"upstream_response_time":$upstream_response_time,"upstream_status":$upstream_status}'
      max-worker-connections: "65536"
      worker-processes: "2"
      proxy-body-size: 20m
      proxy-connect-timeout: "10"
      proxy_next_upstream: error timeout http_502
      reuse-port: "true"
      server-tokens: "false"
      ssl-ciphers: ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA
      ssl-protocols: TLSv1 TLSv1.1 TLSv1.2
      ssl-redirect: "false"
      worker-cpu-affinity: auto
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: tcp-services
      namespace: ingress-nginx
      labels:
        app: ingress-nginx
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: udp-services
      namespace: ingress-nginx
      labels:
        app: ingress-nginx
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: nginx-ingress-controller
      namespace: ingress-nginx
      labels:
        app: ingress-nginx
      annotations:
        component.version: "v0.30.0"
        component.revision: "v1"
    spec:
      # Deployment need:
      # ----------------
    #  replicas: 1
      # ----------------
      selector:
        matchLabels:
          app: ingress-nginx
      template:
        metadata:
          labels:
            app: ingress-nginx
          annotations:
            prometheus.io/port: "10254"
            prometheus.io/scrape: "true"
            scheduler.alpha.kubernetes.io/critical-pod: ""
        spec:
          # DaemonSet need:
          # ----------------
          hostNetwork: true
          # ----------------
          serviceAccountName: nginx-ingress-controller
          priorityClassName: system-node-critical
          affinity:
            podAntiAffinity:
              preferredDuringSchedulingIgnoredDuringExecution:
              - podAffinityTerm:
                  labelSelector:
                    matchExpressions:
                    - key: app
                      operator: In
                      values:
                      - ingress-nginx
                  topologyKey: kubernetes.io/hostname
                weight: 100
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                - matchExpressions:
                  - key: type
                    operator: NotIn
                    values:
                    - virtual-kubelet
          containers:
            - name: nginx-ingress-controller
              image: registry.cn-beijing.aliyuncs.com/acs/aliyun-ingress-controller:v0.30.0.2-9597b3685-aliyun
              imagePullPolicy: IfNotPresent
              args:
                - /nginx-ingress-controller
                - --configmap=$(POD_NAMESPACE)/nginx-configuration
                - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
                - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
                - --publish-service=$(POD_NAMESPACE)/nginx-ingress-lb
                - --annotations-prefix=nginx.ingress.kubernetes.io
                - --enable-dynamic-certificates=true
                - --v=2
              securityContext:
                allowPrivilegeEscalation: true
                capabilities:
                  drop:
                    - ALL
                  add:
                    - NET_BIND_SERVICE
                runAsUser: 101
              env:
                - name: POD_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.name
                - name: POD_NAMESPACE
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.namespace
              ports:
                - name: http
                  containerPort: 80
                - name: https
                  containerPort: 443
              livenessProbe:
                failureThreshold: 3
                httpGet:
                  path: /healthz
                  port: 10254
                  scheme: HTTP
                initialDelaySeconds: 10
                periodSeconds: 10
                successThreshold: 1
                timeoutSeconds: 10
              readinessProbe:
                failureThreshold: 3
                httpGet:
                  path: /healthz
                  port: 10254
                  scheme: HTTP
                periodSeconds: 10
                successThreshold: 1
                timeoutSeconds: 10
    #          resources:
    #            limits:
    #              cpu: "1"
    #              memory: 2Gi
    #            requests:
    #              cpu: "1"
    #              memory: 2Gi
              volumeMounts:
              - mountPath: /etc/localtime
                name: localtime
                readOnly: true
          volumes:
          - name: localtime
            hostPath:
              path: /etc/localtime
              type: File
          nodeSelector:
            boge/ingress-controller-ready: "true"
          tolerations:
          - operator: Exists
          initContainers:
          - command:
            - /bin/sh
            - -c
            - |
              mount -o remount rw /proc/sys
              sysctl -w net.core.somaxconn=65535
              sysctl -w net.ipv4.ip_local_port_range="1024 65535"
              sysctl -w fs.file-max=1048576
              sysctl -w fs.inotify.max_user_instances=16384
              sysctl -w fs.inotify.max_user_watches=524288
              sysctl -w fs.inotify.max_queued_events=16384
            image: registry.cn-beijing.aliyuncs.com/acs/busybox:v1.29.2
            imagePullPolicy: IfNotPresent
            name: init-sysctl
            securityContext:
              privileged: true
              procMount: Default
    ---
    ## Deployment need for aliyun'k8s:
    #apiVersion: v1
    #kind: Service
    #metadata:
    #  annotations:
    #    service.beta.kubernetes.io/alibaba-cloud-loadbalancer-id: "lb-xxxxxxxxxxxxxxxxxxx"
    #    service.beta.kubernetes.io/alibaba-cloud-loadbalancer-force-override-listeners: "true"
    #  labels:
    #    app: nginx-ingress-lb
    #  name: nginx-ingress-lb-local
    #  namespace: ingress-nginx
    #spec:
    #  externalTrafficPolicy: Local
    #  ports:
    #  - name: http
    #    port: 80
    #    protocol: TCP
    #    targetPort: 80
    #  - name: https
    #    port: 443
    #    protocol: TCP
    #    targetPort: 443
    #  selector:
    #    app: ingress-nginx
    #  type: LoadBalancer
    EOF
    
     
    
    kubectl  apply -f /data/k8s/aliyun-ingress-nginx.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208
    • 209
    • 210
    • 211
    • 212
    • 213
    • 214
    • 215
    • 216
    • 217
    • 218
    • 219
    • 220
    • 221
    • 222
    • 223
    • 224
    • 225
    • 226
    • 227
    • 228
    • 229
    • 230
    • 231
    • 232
    • 233
    • 234
    • 235
    • 236
    • 237
    • 238
    • 239
    • 240
    • 241
    • 242
    • 243
    • 244
    • 245
    • 246
    • 247
    • 248
    • 249
    • 250
    • 251
    • 252
    • 253
    • 254
    • 255
    • 256
    • 257
    • 258
    • 259
    • 260
    • 261
    • 262
    • 263
    • 264
    • 265
    • 266
    • 267
    • 268
    • 269
    • 270
    • 271
    • 272
    • 273
    • 274
    • 275
    • 276
    • 277
    • 278
    • 279
    • 280
    • 281
    • 282
    • 283
    • 284
    • 285
    • 286
    • 287
    • 288
    • 289
    • 290
    • 291
    • 292
    • 293
    • 294
    • 295
    • 296
    • 297
    • 298
    • 299
    • 300
    • 301
    • 302
    • 303
    • 304
    • 305
    • 306
    • 307
    • 308
    • 309
    • 310
    • 311
    • 312
    • 313
    • 314
    • 315
    • 316
    • 317
    • 318
    • 319
    • 320
    • 321
    • 322
    • 323
    • 324
    • 325
    • 326
    • 327
    • 328
    • 329
    • 330
    • 331
    • 332
    • 333
    • 334
    • 335
    • 336
    • 337
    • 338
    • 339
    • 340
    • 341
    • 342
    • 343
    • 344
    • 345
    • 346
    • 347
    • 348
    • 349
    • 350

    2.node01/02节点打标签

    #允许节点打标签
    kubectl label node k8s-node01  boge/ingress-controller-ready=true
    kubectl label node k8s-node02  boge/ingress-controller-ready=true
    
    
    #删除标签
    #kubectl label node k8s-node01 boge/ingress-controller-ready-
    #kubectl label node k8s-node02 boge/ingress-controller-ready-
    
    #查看标签
    kubectl get nodes --show-labels
    
    
    kubectl -n ingress-nginx get po -owide
    
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17

    3.haproxy+keepalived部署

    3.0 k8s-mast01/02/03上面部署
    yum install haproxy keepalived  -y
    
    #重启程序
    systemctl restart haproxy.service && systemctl status haproxy.service 
    systemctl restart keepalived.service && systemctl status haproxy.service 
    
    
    # 查看运行状态
    systemctl status haproxy.service 
    systemctl status keepalived.service
    
    #开机自启动
    systemctl  enable keepalived.service
    systemctl  enable haproxy.service
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    3.1 k8s-mast01/02/03修改配置haproxy
    cat >> /etc/haproxy/haproxy.cfg <<EOF
    ###################################################
    listen ingress-http
            bind 0.0.0.0:80
            mode tcp
            option tcplog
            option dontlognull
            option dontlog-normal
            balance roundrobin
            server 192.168.1.223 192.168.1.223:80 check inter 2000 fall 2 rise 2 weight 1
            server 192.168.1.224 192.168.1.224:80 check inter 2000 fall 2 rise 2 weight 1
    
    listen ingress-https
            bind 0.0.0.0:443
            mode tcp
            option tcplog
            option dontlognull
            option dontlog-normal
            balance roundrobin
            server 192.168.1.223 192.168.1.223:443 check inter 2000 fall 2 rise 2 weight 1
            server 192.168.1.224 192.168.1.224:443 check inter 2000 fall 2 rise 2 weight 1
    ###################################################
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    3.2 k8s-mast01/02/03修改keepalived配置
    cat > /etc/keepalived/keepalived.conf <<EOF
    global_defs {
        router_id lb-master
    }
    vrrp_script check-haproxy {
        script "killall -0 haproxy"
        interval 5
        weight -60
    }
    vrrp_instance VI-kube-master {
        state MASTER
        priority 120
        dont_track_primary
        interface ens160  				# 注意这里的网卡名称修改成你机器真实的内网网卡名称
        virtual_router_id 111
        advert_int 3              # 指定VRRP 心跳包通告间隔时间,默认3秒
        track_script {
            check-haproxy
        }
        virtual_ipaddress {
            192.168.1.100				#vip  地址
        }
    }
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    3.3 k8s-mast01/02/03 重启服务
    
    #重启服务
    systemctl restart haproxy.service
    systemctl restart keepalived.service
    
    
    # 查看运行状态
    systemctl status haproxy.service 
    systemctl status keepalived.service
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11

    4.部署nginx-ingress

    cat > /root/nginx-ingress.yaml <<EOF
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx
      labels:
        app: nginx
    spec:
      ports:
        - port: 80
          protocol: TCP
          targetPort: 80
      selector:
        app: nginx
    ---
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: nginx
      labels:
        app: nginx
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: nginx
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
            - name: nginx
              image: nginx
              imagePullPolicy: IfNotPresent #镜像在本地不存在时才会拉取
              ports:
                - containerPort: 80
    ---
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: nginx-ingress
    spec:
      rules:
        - host: nginx.boge.com
          http:
            paths:
              - backend:
                  serviceName: nginx
                  servicePort: 80
                path: /
    EOF
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52

    5.测试nginx-ingress

    
    
    kubectl apply -f /root/nginx-ingress.yaml
    #查看创建的ingress资源
    kubectl get ingress -A
    
    
    
    
    
    #服务器新增域名解析
    echo "192.168.1.100 nginx.boge.com" >> /etc/hosts
    
    
    
    
    # 我们在其它节点上,加下本地hosts,来测试下效果
    192.168.1.100 nginx.boge.com
    
    
    
    #测试
    curl nginx.boge.com
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23

    九、elk日志监控

    1.创建测试tomcat

    cat > 01-tomcat-test.yaml <<EOF
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      labels:
        app: tomcat
      name: tomcat
    spec:
      replicas: 1
      selector:
        matchLabels:
          app: tomcat
      template:
        metadata:
          labels:
            app: tomcat
        spec:
          tolerations:
          - key: "node-role.kubernetes.io/master"
            effect: "NoSchedule"
          containers:
          - name: tomcat
            image: "tomcat:7.0"
            env:      # 注意点一,添加相应的环境变量(下面收集了两块日志1、stdout 2、/usr/local/tomcat/logs/catalina.*.log)
            - name: aliyun_logs_tomcat-syslog   # 如日志发送到es,那index名称为 tomcat-syslog
              value: "stdout"
            - name: aliyun_logs_tomcat-access   # 如日志发送到es,那index名称为 tomcat-access
              value: "/usr/local/tomcat/logs/catalina.*.log"
            volumeMounts:   # 注意点二,对pod内要收集的业务日志目录需要进行共享,可以收集多个目录下的日志文件
              - name: tomcat-log
                mountPath: /usr/local/tomcat/logs
          volumes:
            - name: tomcat-log
              emptyDir: {}
    EOF
    
    
    
    kubectl apply -f 01-tomcat-test.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39

    2.部署elasticsearch

    kubectl create ns logging
    
    
    
    cat > 02-elasticsearch.6.8.13-statefulset.yaml << EOF
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      labels:
        addonmanager.kubernetes.io/mode: Reconcile
        k8s-app: elasticsearch-logging
        version: v6.8.13
      name: elasticsearch-logging
      namespace: logging
    spec:
      replicas: 1
      revisionHistoryLimit: 10
      selector:
        matchLabels:
          k8s-app: elasticsearch-logging
          version: v6.8.13
      serviceName: elasticsearch-logging
      template:
        metadata:
          labels:
            k8s-app: elasticsearch-logging
            version: v6.8.13
        spec:
    #      nodeSelector:
    #        esnode: "true"  ## 注意给想要运行到的node打上相应labels
          containers:
          - env:
            - name: NAMESPACE
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.namespace
            - name: cluster.name
              value: elasticsearch-logging-0
            - name: ES_JAVA_OPTS
              value: "-Xms512m -Xmx512m"
            image: elastic/elasticsearch:6.8.13
            name: elasticsearch-logging
            ports:
            - containerPort: 9200
              name: db
              protocol: TCP
            - containerPort: 9300
              name: transport
              protocol: TCP
            volumeMounts:
            - mountPath: /usr/share/elasticsearch/data
              name: elasticsearch-logging
          dnsConfig:
            options:
            - name: single-request-reopen
          initContainers:
          - command:
            - /bin/sysctl
            - -w
            - vm.max_map_count=262144
            image: busybox
            imagePullPolicy: IfNotPresent
            name: elasticsearch-logging-init
            resources: {}
            securityContext:
              privileged: true
          - name: fix-permissions
            image: busybox
            command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
            securityContext:
              privileged: true
            volumeMounts:
            - name: elasticsearch-logging
              mountPath: /usr/share/elasticsearch/data
          volumes:
          - name: elasticsearch-logging
            hostPath:
              path: /esdata
    ---
    apiVersion: v1
    kind: Service
    metadata:
      labels:
        k8s-app: elasticsearch-logging
      name: elasticsearch
      namespace: logging
    spec:
      ports:
      - port: 9200
        protocol: TCP
        targetPort: db
      selector:
        k8s-app: elasticsearch-logging
      type: ClusterIP
    EOF
    
    
      
      
    kubectl apply -f 02-elasticsearch.6.8.13-statefulset.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101

    3.部署kibana

    cat > 03-kibana.6.8.13.yaml <<EOF
    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: kibana
      namespace: logging
      labels:
        app: kibana
    spec:
      selector:
        matchLabels:
          app: kibana
      template:
        metadata:
          labels:
            app: kibana
        spec:
          containers:
          - name: kibana
            image: elastic/kibana:6.8.13
            resources:
              limits:
                cpu: 1000m
              requests:
                cpu: 100m
            env:
              - name: ELASTICSEARCH_URL
                value: http://elasticsearch:9200
            ports:
            - containerPort: 5601
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: kibana
      namespace: logging
      labels:
        app: kibana
    spec:
      ports:
      - port: 5601
        protocol: TCP
        targetPort: 5601
      type: ClusterIP
      selector:
        app: kibana
    ---
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: kibana
      namespace: logging
    spec:
      rules:
      - host: kibana.boge.com
        http:
          paths:
          - path: /
            backend:
              serviceName: kibana
              servicePort: 5601
    EOF
    
    
    
    
    kubectl apply -f 03-kibana.6.8.13.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67

    4.部署log-pilot

    cat > 04-log-pilot.yml <<EOF
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: log-pilot
      namespace: logging
      labels:
        app: log-pilot
      # 设置期望部署的namespace
    
    spec:
      selector:
        matchLabels:
          app: log-pilot
      updateStrategy:
        type: RollingUpdate
      template:
        metadata:
          labels:
            app: log-pilot
          annotations:
            scheduler.alpha.kubernetes.io/critical-pod: ''
        spec:
          # 是否允许部署到Master节点上
          #tolerations:
          #- key: node-role.kubernetes.io/master
          #  effect: NoSchedule
          containers:
          - name: log-pilot
            # 版本请参考https://github.com/AliyunContainerService/log-pilot/releases
            image: registry.cn-hangzhou.aliyuncs.com/acs/log-pilot:0.9.7-filebeat
            resources:
              limits:
                memory: 500Mi
              requests:
                cpu: 200m
                memory: 200Mi
            env:
              - name: "NODE_NAME"
                valueFrom:
                  fieldRef:
                    fieldPath: spec.nodeName
              ##--------------------------------
    #          - name: "LOGGING_OUTPUT"
    #            value: "logstash"
    #          - name: "LOGSTASH_HOST"
    #            value: "logstash-g1"
    #          - name: "LOGSTASH_PORT"
    #            value: "5044"
              ##--------------------------------
              - name: "LOGGING_OUTPUT"
                value: "elasticsearch"
              ## 请确保集群到ES网络可达
              - name: "ELASTICSEARCH_HOSTS"
                value: "elasticsearch:9200"
              ## 配置ES访问权限
              #- name: "ELASTICSEARCH_USER"
              #  value: "{es_username}"
              #- name: "ELASTICSEARCH_PASSWORD"
              #  value: "{es_password}"
              ##--------------------------------
              ## https://github.com/AliyunContainerService/log-pilot/blob/master/docs/filebeat/docs.md
              ## to file need configure 1
    #          - name: LOGGING_OUTPUT
    #            value: file
    #          - name: FILE_PATH
    #            value: /tmp
    #          - name: FILE_NAME
    #            value: filebeat.log
            volumeMounts:
            - name: sock
              mountPath: /var/run/docker.sock
            - name: root
              mountPath: /host
              readOnly: true
            - name: varlib
              mountPath: /var/lib/filebeat
            - name: varlog
              mountPath: /var/log/filebeat
            - name: localtime
              mountPath: /etc/localtime
              readOnly: true
             ## to file need configure 2
    #        - mountPath: /tmp
    #          name: mylog
            livenessProbe:
              failureThreshold: 3
              exec:
                command:
                - /pilot/healthz
              initialDelaySeconds: 10
              periodSeconds: 10
              successThreshold: 1
              timeoutSeconds: 2
            securityContext:
              capabilities:
                add:
                - SYS_ADMIN
          terminationGracePeriodSeconds: 30
          volumes:
          - name: sock
            hostPath:
              path: /var/run/docker.sock
          - name: root
            hostPath:
              path: /
          - name: varlib
            hostPath:
              path: /var/lib/filebeat
              type: DirectoryOrCreate
          - name: varlog
            hostPath:
              path: /var/log/filebeat
              type: DirectoryOrCreate
          - name: localtime
            hostPath:
              path: /etc/localtime
           ## to file need configure 3
    #      - hostPath:
    #          path: /tmp/mylog
    #          type: ""
    #        name: mylog
    EOF
    
    kubectl apply -f 04-log-pilot.yml
    
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127

    4.1 url配置

    节点加入hosts解析
    echo “192.168.1.100 kibana.boge.com” >> /etc/hosts

    5.配置kibana页面

    Managenment>index Patterns>Create index pattern

    #创建日志
    Create index pattern> index pattern(tomcat-access*)>Next step

    #创建时间
    Time Filter field name(@timestamp)>Create index pattern

    #查看日志展示
    Discover>tomcat-access*

    十、Prometheus监控

    1.导入离线包

    链接:https://pan.baidu.com/s/1DyMJPT8r_TUpI8Dr31SVew?pwd=m1bk 
    提取码:m1bk
    
    
    #导入上传tar包
    sudo docker load -i alertmanager-v0.21.0.tar
    sudo docker load -i grafana-7.3.4.tar
    sudo docker load -i k8s-prometheus-adapter-v0.8.2.tar
    sudo docker load -i kube-rbac-proxy-v0.8.0.tar
    sudo docker load -i kube-state-metrics-v1.9.7.tar
    sudo docker load -i node-exporter-v1.0.1.tar
    sudo docker load -i prometheus-config-reloader-v0.43.2.tar
    sudo docker load -i prometheus_demo_service.tar
    sudo docker load -i prometheus-operator-v0.43.2.tar
    sudo docker load -i prometheus-v2.22.1.tar
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15

    2.主节点创建

    
    #解压下载的代码包
    sudo unzip kube-prometheus-master.zip
    sudo rm -f kube-prometheus-master.zip && cd kube-prometheus-master
    
    
    #这里建议先看下有哪些镜像,便于在下载镜像快的节点上先收集好所有需要的离线docker镜像
    find ./ -type f |xargs grep 'image: '|sort|uniq|awk '{print $3}'|grep ^[a-zA-Z]|grep -Evw 'error|kubeRbacProxy'|sort -rn|uniq
    
    
    kubectl create -f manifests/setup
    kubectl create -f manifests/
    
    
    
    #过一会查看创建结果:
    kubectl -n monitoring get all
     
     
     
    # 附:清空上面部署的prometheus所有服务:
    # kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22

    3. 访问下prometheus的UI

    # 修改下prometheus UI的service模式,便于我们访问
    kubectl -n monitoring patch svc prometheus-k8s -p '{"spec":{"type":"NodePort"}}'
    
    # kubectl -n monitoring get svc prometheus-k8s 
    NAME             TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)          AGE
    prometheus-k8s   NodePort   10.68.23.79   <none>        9090:22129/TCP   7m43s
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    3.1 修改用户权限

    #   kubectl edit clusterrole prometheus-k8s
    #------ 原始的rules -------
    rules:
    - apiGroups:
      - ""
      resources:
      - nodes/metrics
      verbs:
      - get
    - nonResourceURLs:
      - /metrics
      verbs:
      - get
    #---------------------------
    
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      name: prometheus-k8s
    rules:
    - apiGroups:
      - ""
      resources:
      - nodes
      - services
      - endpoints
      - pods
      - nodes/proxy
      verbs:
      - get
      - list
      - watch
    - apiGroups:
      - ""
      resources:
      - configmaps
      - nodes/metrics
      verbs:
      - get
    - nonResourceURLs:
      - /metrics
      verbs:
      - get
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43

    4. 监控ingress-nginx

    cat > servicemonitor.yaml <<EOF
    apiVersion: monitoring.coreos.com/v1
    kind: ServiceMonitor
    metadata:
      labels:
        app: ingress-nginx
      name: nginx-ingress-scraping
      namespace: ingress-nginx
    spec:
      endpoints:
      - interval: 30s
        path: /metrics
        port: metrics
      jobLabel: app
      namespaceSelector:
        matchNames:
        - ingress-nginx
      selector:
        matchLabels:
          app: ingress-nginx
    EOF
    
    
    kubectl apply -f servicemonitor.yaml
    
    
    kubectl -n ingress-nginx get servicemonitors.monitoring.coreos.com
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27

    十一、安装kubesphere3.3

    官网参考文档

    https://kubesphere.com.cn/docs/v3.3/pluggable-components/alerting/

    1.部署kubesphere时需要默认 StorageClass

    kubectl edit sc nfs-boge

      metadata:
        annotations:
          storageclass.beta.kubernetes.io/is-default-class: "true"
    
    • 1
    • 2
    • 3

    2.下载yaml

    您的 Kubernetes 版本必须为:v1.20.x、v1.21.x、* v1.22.x、* v1.23.x 和 * v1.24.x。带星号的版本可能出现边缘节点部分功能不可用的情况。因此,如需使用边缘节点,推荐安装 v1.21.x。
    wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/kubesphere-installer.yaml
    wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/cluster-configuration.yaml
    
    
    wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/kubesphere-installer.yaml
    wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.2/cluster-configuration.yaml
    
    
    
    #修改cluster-configuration.yaml
    #将ectd下的 endpointIps改为你的master节点的私有IP地址。
    #endpointIps: XX.X.X.X
      etcd:
        monitoring: true       # Enable or disable etcd monitoring dashboard installation. You have to create a Secret for etcd before you enable it.
        endpointIps: '192.168.1.220,192.168.1.221,192.168.1.222'
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17

    参考修改

    apiVersion: installer.kubesphere.io/v1alpha1
    kind: ClusterConfiguration
    metadata:
      name: ks-installer
      namespace: kubesphere-system
      labels:
        version: v3.1.1
    spec:
      persistence:
        storageClass: ""        #这里保持默认即可,因为我们有了默认的存储类
      authentication:
        jwtSecret: ""           # Keep the jwtSecret consistent with the Host Cluster. Retrieve the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the Host Cluster.
      local_registry: ""        # Add your private registry address if it is needed.
      etcd:
        monitoring: true       # 改为"true",表示开启etcd的监控功能
        endpointIps: 192.168.11.24  # 改为自己的master节点IP地址
        port: 2379              # etcd port.
        tlsEnable: true
      common:
        redis:
          enabled: true         #改为"true",开启redis功能
        openldap:
          enabled: true         #改为"true",开启轻量级目录协议
        minioVolumeSize: 20Gi # Minio PVC size.
        openldapVolumeSize: 2Gi   # openldap PVC size.
        redisVolumSize: 2Gi # Redis PVC size.
        monitoring:
          # type: external   # Whether to specify the external prometheus stack, and need to modify the endpoint at the next line.
          endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 # Prometheus endpoint to get metrics data.
        es:   # Storage backend for logging, events and auditing.
          # elasticsearchMasterReplicas: 1   # The total number of master nodes. Even numbers are not allowed.
          # elasticsearchDataReplicas: 1     # The total number of data nodes.
          elasticsearchMasterVolumeSize: 4Gi   # The volume size of Elasticsearch master nodes.
          elasticsearchDataVolumeSize: 20Gi    # The volume size of Elasticsearch data nodes.
          logMaxAge: 7                     # Log retention time in built-in Elasticsearch. It is 7 days by default.
          elkPrefix: logstash              # The string making up index names. The index name will be formatted as ks--log.
          basicAuth:
            enabled: false
            username: ""
            password: ""
          externalElasticsearchUrl: ""
          externalElasticsearchPort: ""
      console:
        enableMultiLogin: true  # Enable or disable simultaneous logins. It allows different users to log in with the same account at the same time.
        port: 30880
      alerting:                # (CPU: 0.1 Core, Memory: 100 MiB) It enables users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
        enabled: true         # 改为"true",开启告警功能
        # thanosruler:
        #   replicas: 1
        #   resources: {}
      auditing:                # Provide a security-relevant chronological set of records,recording the sequence of activities happening on the platform, initiated by different tenants.
        enabled: true         #  改为"true",开启审计功能
      devops:                  # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
        enabled: true             # 改为"true",开启DevOps功能
        jenkinsMemoryLim: 2Gi      # Jenkins memory limit.
        jenkinsMemoryReq: 1500Mi   # Jenkins memory request.
        jenkinsVolumeSize: 8Gi     # Jenkins volume size.
        jenkinsJavaOpts_Xms: 512m  # The following three fields are JVM parameters.
        jenkinsJavaOpts_Xmx: 512m
        jenkinsJavaOpts_MaxRAM: 2g
      events:                  # Provide a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
        enabled: true         # 改为"true",开启集群的事件功能
        ruler:
          enabled: true
          replicas: 2
      logging:                 # (CPU: 57 m, Memory: 2.76 G) Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
        enabled: true        # 改为"true",开启日志功能
        logsidecar:
          enabled: true
          replicas: 2
      metrics_server:                    # (CPU: 56 m, Memory: 44.35 MiB) It enables HPA (Horizontal Pod Autoscaler).
        enabled: false                   # 这个不用修改,因为在上面我们已经安装过了,如果这里开启,镜像是官方的,会拉取镜像失败
      monitoring:
        storageClass: ""                 # If there is an independent StorageClass you need for Prometheus, you can specify it here. The default StorageClass is used by default.
        # prometheusReplicas: 1          # Prometheus replicas are responsible for monitoring different segments of data source and providing high availability.
        prometheusMemoryRequest: 400Mi   # Prometheus request memory.
        prometheusVolumeSize: 20Gi       # Prometheus PVC size.
        # alertmanagerReplicas: 1          # AlertManager Replicas.
      multicluster:
        clusterRole: none  # host | member | none  # You can install a solo cluster, or specify it as the Host or Member Cluster.
      network:
        networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
          # Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
          enabled: true # 改为"true",开启网络策略
        ippool: # Use Pod IP Pools to manage the Pod network address space. Pods to be created can be assigned IP addresses from a Pod IP Pool.
          type: none #如果你的网络插件是calico,需要修改为"calico",这里我是Flannel,保持默认。
        topology: # Use Service Topology to view Service-to-Service communication based on Weave Scope.
          type: none # Specify "weave-scope" for this field to enable Service Topology. "none" means that Service Topology is disabled.
      openpitrix: # An App Store that is accessible to all platform tenants. You can use it to manage apps across their entire lifecycle.
        store:
          enabled: true # 改为"true",开启应用商店
      servicemesh:         # (0.3 Core, 300 MiB) Provide fine-grained traffic management, observability and tracing, and visualized traffic topology.
        enabled: true     # 改为"true",开启微服务治理
      kubeedge:          # Add edge nodes to your cluster and deploy workloads on edge nodes.
        enabled: false   # 这个就不修改了,这个是边缘服务,我们也没有边缘的设备。
        cloudCore:
          nodeSelector: {"node-role.kubernetes.io/worker": ""}
          tolerations: []
          cloudhubPort: "10000"
          cloudhubQuicPort: "10001"
          cloudhubHttpsPort: "10002"
          cloudstreamPort: "10003"
          tunnelPort: "10004"
          cloudHub:
            advertiseAddress: # At least a public IP address or an IP address which can be accessed by edge nodes must be provided.
              - ""            # Note that once KubeEdge is enabled, CloudCore will malfunction if the address is not provided.
            nodeLimit: "100"
          service:
            cloudhubNodePort: "30000"
            cloudhubQuicNodePort: "30001"
            cloudhubHttpsNodePort: "30002"
            cloudstreamNodePort: "30003"
            tunnelNodePort: "30004"
        edgeWatcher:
          nodeSelector: {"node-role.kubernetes.io/worker": ""}
          tolerations: []
          edgeWatcherAgent:
            nodeSelector: {"node-role.kubernetes.io/worker": ""}
            tolerations: []
            
    
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121

    3.运行yaml

    kubectl apply -f kubesphere-installer.yaml
    
    kubectl apply -f cluster-configuration.yaml
    
    
    • 1
    • 2
    • 3
    • 4

    4. 查看日志

    kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
    
    
    #访问任意机器的 30880端口
    #账号 : admin
    #密码 : P@88w0rd
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    5. 解决etcd监控证书找不到问题

    kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs  \
    --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt  \
    --from-file=etcd-client.crt=/etc/kubernetes/pki/etcd/healthcheck-client.crt  \
    --from-file=etcd-client.key=/etc/kubernetes/pki/etcd/healthcheck-client.key
    
    • 1
    • 2
    • 3
    • 4

    6. 在安装后启用告警系统,

    在 cluster-configuration.yaml 文件中,搜索 alerting,将 enabled 的 false 更改为 true 以启用告警系统。完成后保存文件

    alerting:
      enabled: true # 将“false”更改为“true”。
    
    
    #运行
    kubectl apply -f kubesphere-installer.yaml
    
    kubectl apply -f cluster-configuration.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8

    6.0 配置钉钉报警

    6.1 钉钉自定义机器配置

    添加自定义机器人,安全配置,勾选** 加签**

    6.2 操作步骤

    左上角>平台管理>平台设置>通知管理>通知配置>钉钉>群机器人配置
    开启-已启用
    填写自己的 Webhook URL
    填写自己的 密钥 (加签
    发送测试信息
    确定

    查看钉钉群消息.是否发送成功?????

    7. 在安装后启用应用商店,

    在该 YAML 文件中,搜索 openpitrix,将 enabled 的 false 改为 true。完成后,点击右下角的确定,保存配置。

    openpitrix:
      store:
        enabled: true # 将“false”更改为“true”。
    
    
    #运行
    kubectl apply -f kubesphere-installer.yaml
    
    kubectl apply -f cluster-configuration.yaml
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9

    8.在安装后启用服务网格 istio

    在该配置文件中,搜索 servicemesh,并将 enabled 的 false 改为 true。完成后,点击右下角的确定,保存配置

    servicemesh:
    enabled: true # 将“false”更改为“true”。
    istio: # Customizing the istio installation configuration, refer to https://istio.io/latest/docs/setup/additional-setup/customize-installation/
      components:
        ingressGateways:
        - name: istio-ingressgateway # 将服务暴露至服务网格之外。默认不开启。
          enabled: false
        cni:
          enabled: false # 启用后,会在 Kubernetes pod 生命周期的网络设置阶段完成 Istio 网格的 pod 流量转发设置工作。
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9

    9.在安装前启用 DevOps

    在该 YAML 文件中,搜索 devops,将 enabled 的 false 改为 true。完成后,点击右下角的确定,保存配置。

    devops:
      enabled: true # 将“false”更改为“true”。
    
    • 1
    • 2

    10. 卸载方法

    
    kubectl delete -f cluster-configuration.yaml --force
    kubectl delete -f kubesphere-installer.yaml --force
    
    • 1
    • 2
    • 3

    #删除残余文件
    vi del.sh

    #!/usr/bin/env bash
     
    function delete_sure(){
      cat << eof
    $(echo -e "\033[1;36mNote:\033[0m")
    Delete the KubeSphere cluster, including the module kubesphere-system kubesphere-devops-system kubesphere-devops-worker kubesphere-monitoring-system kubesphere-logging-system openpitrix-system.
    eof
     
    read -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
    while [[ "x"$ans != "xyes" && "x"$ans != "xno" ]]; do
        read -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
    done
     
    if [[ "x"$ans == "xno" ]]; then
        exit
    fi
    }
     
     
    delete_sure
     
    # delete ks-installer
    kubectl delete deploy ks-installer -n kubesphere-system 2>/dev/null
     
    # delete helm
    for namespaces in kubesphere-system kubesphere-devops-system kubesphere-monitoring-system kubesphere-logging-system openpitrix-system kubesphere-monitoring-federated
    do
      helm list -n $namespaces | grep -v NAME | awk '{print $1}' | sort -u | xargs -r -L1 helm uninstall -n $namespaces 2>/dev/null
    done
     
    # delete kubefed
    kubectl get cc -n kubesphere-system ks-installer -o jsonpath="{.status.multicluster}" | grep enable
    if [[ $? -eq 0 ]]; then
      # delete kubefed types resources
      for kubefed in `kubectl api-resources --namespaced=true --api-group=types.kubefed.io -o name`
      do
        kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
      done
      for kubefed in `kubectl api-resources --namespaced=false --api-group=types.kubefed.io -o name`
      do
        kubectl delete $kubefed --all 2>/dev/null
      done
      # delete kubefed core resouces
      for kubefed in `kubectl api-resources --namespaced=true --api-group=core.kubefed.io -o name`
      do
        kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
      done
      for kubefed in `kubectl api-resources --namespaced=false --api-group=core.kubefed.io -o name`
      do
        kubectl delete $kubefed --all 2>/dev/null
      done
      # uninstall kubefed chart
      helm uninstall -n kube-federation-system kubefed 2>/dev/null
    fi
     
     
    helm uninstall -n kube-system snapshot-controller 2>/dev/null
     
    # delete kubesphere deployment & statefulset
    kubectl delete deployment -n kubesphere-system `kubectl get deployment -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
    kubectl delete statefulset -n kubesphere-system `kubectl get statefulset -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
     
    # delete monitor resources
    kubectl delete prometheus -n kubesphere-monitoring-system k8s 2>/dev/null
    kubectl delete Alertmanager -n kubesphere-monitoring-system main 2>/dev/null
    kubectl delete DaemonSet -n kubesphere-monitoring-system node-exporter 2>/dev/null
    kubectl delete statefulset -n kubesphere-monitoring-system `kubectl get statefulset -n kubesphere-monitoring-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
     
    # delete grafana
    kubectl delete deployment -n kubesphere-monitoring-system grafana 2>/dev/null
    kubectl --no-headers=true get pvc -n kubesphere-monitoring-system -o custom-columns=:metadata.namespace,:metadata.name | grep -E kubesphere-monitoring-system | xargs -n2 kubectl delete pvc -n 2>/dev/null
     
    # delete pvc
    pvcs="kubesphere-system|openpitrix-system|kubesphere-devops-system|kubesphere-logging-system"
    kubectl --no-headers=true get pvc --all-namespaces -o custom-columns=:metadata.namespace,:metadata.name | grep -E $pvcs | xargs -n2 kubectl delete pvc -n 2>/dev/null
     
     
    # delete rolebindings
    delete_role_bindings() {
      for rolebinding in `kubectl -n $1 get rolebindings -l iam.kubesphere.io/user-ref -o jsonpath="{.items[*].metadata.name}"`
      do
        kubectl -n $1 delete rolebinding $rolebinding 2>/dev/null
      done
    }
     
    # delete roles
    delete_roles() {
      kubectl -n $1 delete role admin 2>/dev/null
      kubectl -n $1 delete role operator 2>/dev/null
      kubectl -n $1 delete role viewer 2>/dev/null
      for role in `kubectl -n $1 get roles -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
      do
        kubectl -n $1 delete role $role 2>/dev/null
      done
    }
     
    # remove useless labels and finalizers
    for ns in `kubectl get ns -o jsonpath="{.items[*].metadata.name}"`
    do
      kubectl label ns $ns kubesphere.io/workspace-
      kubectl label ns $ns kubesphere.io/namespace-
      kubectl patch ns $ns -p '{"metadata":{"finalizers":null,"ownerReferences":null}}'
      delete_role_bindings $ns
      delete_roles $ns
    done
     
    # delete clusterroles
    delete_cluster_roles() {
      for role in `kubectl get clusterrole -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
      do
        kubectl delete clusterrole $role 2>/dev/null
      done
     
      for role in `kubectl get clusterroles | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
      do
        kubectl delete clusterrole $role 2>/dev/null
      done
    }
    delete_cluster_roles
     
    # delete clusterrolebindings
    delete_cluster_role_bindings() {
      for rolebinding in `kubectl get clusterrolebindings -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
      do
        kubectl delete clusterrolebindings $rolebinding 2>/dev/null
      done
     
      for rolebinding in `kubectl get clusterrolebindings | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
      do
        kubectl delete clusterrolebindings $rolebinding 2>/dev/null
      done
    }
    delete_cluster_role_bindings
     
    # delete clusters
    for cluster in `kubectl get clusters -o jsonpath="{.items[*].metadata.name}"`
    do
      kubectl patch cluster $cluster -p '{"metadata":{"finalizers":null}}' --type=merge
    done
    kubectl delete clusters --all 2>/dev/null
     
    # delete workspaces
    for ws in `kubectl get workspaces -o jsonpath="{.items[*].metadata.name}"`
    do
      kubectl patch workspace $ws -p '{"metadata":{"finalizers":null}}' --type=merge
    done
    kubectl delete workspaces --all 2>/dev/null
     
    # make DevOps CRs deletable
    for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do
        for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do
            for devops_res in $(kubectl get $devops_crd -n $ns -oname); do
                kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge
            done
        done
    done
     
    # delete validatingwebhookconfigurations
    for webhook in ks-events-admission-validate users.iam.kubesphere.io network.kubesphere.io validating-webhook-configuration resourcesquotas.quota.kubesphere.io
    do
      kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
    done
     
    # delete mutatingwebhookconfigurations
    for webhook in ks-events-admission-mutate logsidecar-injector-admission-mutate mutating-webhook-configuration
    do
      kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
    done
     
    # delete users
    for user in `kubectl get users -o jsonpath="{.items[*].metadata.name}"`
    do
      kubectl patch user $user -p '{"metadata":{"finalizers":null}}' --type=merge
    done
    kubectl delete users --all 2>/dev/null
     
     
    # delete helm resources
    for resource_type in `echo helmcategories helmapplications helmapplicationversions helmrepos helmreleases`; do
      for resource_name in `kubectl get ${resource_type}.application.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`; do
        kubectl patch ${resource_type}.application.kubesphere.io ${resource_name} -p '{"metadata":{"finalizers":null}}' --type=merge
      done
      kubectl delete ${resource_type}.application.kubesphere.io --all 2>/dev/null
    done
     
    # delete workspacetemplates
    for workspacetemplate in `kubectl get workspacetemplates.tenant.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`
    do
      kubectl patch workspacetemplates.tenant.kubesphere.io $workspacetemplate -p '{"metadata":{"finalizers":null}}' --type=merge
    done
    kubectl delete workspacetemplates.tenant.kubesphere.io --all 2>/dev/null
     
    # delete federatednamespaces in namespace kubesphere-monitoring-federated
    for resource in $(kubectl get federatednamespaces.types.kubefed.io -n kubesphere-monitoring-federated -oname); do
      kubectl patch "${resource}" -p '{"metadata":{"finalizers":null}}' --type=merge -n kubesphere-monitoring-federated
    done
     
    # delete crds
    for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`
    do
      if [[ $crd == *kubesphere.io ]] || [[ $crd == *kubefed.io ]] ; then kubectl delete crd $crd 2>/dev/null; fi
    done
     
    # delete relevance ns
    for ns in kube-federation-system kubesphere-alerting-system kubesphere-controls-system kubesphere-devops-system kubesphere-devops-worker kubesphere-logging-system kubesphere-monitoring-system kubesphere-monitoring-federated openpitrix-system kubesphere-system
    do
      kubectl delete ns $ns 2>/dev/null
    done
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30
    • 31
    • 32
    • 33
    • 34
    • 35
    • 36
    • 37
    • 38
    • 39
    • 40
    • 41
    • 42
    • 43
    • 44
    • 45
    • 46
    • 47
    • 48
    • 49
    • 50
    • 51
    • 52
    • 53
    • 54
    • 55
    • 56
    • 57
    • 58
    • 59
    • 60
    • 61
    • 62
    • 63
    • 64
    • 65
    • 66
    • 67
    • 68
    • 69
    • 70
    • 71
    • 72
    • 73
    • 74
    • 75
    • 76
    • 77
    • 78
    • 79
    • 80
    • 81
    • 82
    • 83
    • 84
    • 85
    • 86
    • 87
    • 88
    • 89
    • 90
    • 91
    • 92
    • 93
    • 94
    • 95
    • 96
    • 97
    • 98
    • 99
    • 100
    • 101
    • 102
    • 103
    • 104
    • 105
    • 106
    • 107
    • 108
    • 109
    • 110
    • 111
    • 112
    • 113
    • 114
    • 115
    • 116
    • 117
    • 118
    • 119
    • 120
    • 121
    • 122
    • 123
    • 124
    • 125
    • 126
    • 127
    • 128
    • 129
    • 130
    • 131
    • 132
    • 133
    • 134
    • 135
    • 136
    • 137
    • 138
    • 139
    • 140
    • 141
    • 142
    • 143
    • 144
    • 145
    • 146
    • 147
    • 148
    • 149
    • 150
    • 151
    • 152
    • 153
    • 154
    • 155
    • 156
    • 157
    • 158
    • 159
    • 160
    • 161
    • 162
    • 163
    • 164
    • 165
    • 166
    • 167
    • 168
    • 169
    • 170
    • 171
    • 172
    • 173
    • 174
    • 175
    • 176
    • 177
    • 178
    • 179
    • 180
    • 181
    • 182
    • 183
    • 184
    • 185
    • 186
    • 187
    • 188
    • 189
    • 190
    • 191
    • 192
    • 193
    • 194
    • 195
    • 196
    • 197
    • 198
    • 199
    • 200
    • 201
    • 202
    • 203
    • 204
    • 205
    • 206
    • 207
    • 208

    #执行删除
    sh del.sh

    十二、 GitLab安装

    1. 单独准备服务器,采用Docker安装

    docker search gitlab
    docker pull gitlab/gitlab-ce
    
    • 1
    • 2

    2.准备docker-compose.yml文件

    
    mkdir -p /data/git
    
    vim /data/git/docker-compose.yml
    
    version: '3.1'
    services:
      gitlab:
        image: 'gitlab/gitlab-ce:latest'
        container_name: gitlab
        restart: always
        environment:
          GITLAB_OMNIBUS_CONFIG: |
            external_url 'http://10.1.100.225:8929'#自己安装git的服务器IP
            gitlab_rails['gitlab_shell_ssh_port'] = 2224
        ports:
          - '8929:8929'
          - '2224:2224'
        volumes:
          - './config:/etc/gitlab'
          - './logs:/var/log/gitlab'
          - './data:/var/opt/gitlab'
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22

    3.启动容器(需要稍等很久……)

    cd /data/git
    docker-compose up -d
    
    • 1
    • 2

    4.访问GitLab首页

    http://10.1.100.225:8929

    5.查看root用户初始密码

    docker exec -it gitlab cat /etc/gitlab/initial_root_password
    
    • 1

    6.第一次登录网页,需要修改密码 Password

    **右上角>>**Administrator>Preferences>Password

    十三、DevOps初始化环境

    1.linux系统 安装Jenkins、jdk 、maven

    1.下载地址

    JDK包下载地址
    https://www.oracle.com/java/technologies/downloads/

    MAven下载地址
    https://maven.apache.org/download.cgi

    2.安装jdk maven

    tar -zxvf jdk-8*.tar.gz -C /usr/local/
    tar -zxvf apache-maven-*.tar.gz -C /usr/local/
    
    cd /usr/local
    mv apache-maven*/ maven
    mv jdk1.8*/ jdk
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    2.1 编辑maven配置

    vim /usr/local/maven/conf/settings.xml

     
    <!--#maven配置阿里云仓库,在160行插入-->
    <mirror>
        <id>nexus-aliyun</id>
        <mirrorOf>central</mirrorOf>
        <name>Nexus aliyun</name>
        <url>http://maven.aliyun.com/nexus/content/groups/public</url>
    </mirror>
     
     
    <!--#maven配置jdk,在252行插入-->
    <profile>    
         <id>jdk1.8</id>    
         <activation>    
             <activeByDefault>true</activeByDefault>    
             <jdk>1.8</jdk>    
        </activation>    
        <properties>    
        	<maven.compiler.source>1.8</maven.compiler.source>    
        	<maven.compiler.target>1.8</maven.compiler.target>    
            <maven.compiler.compilerVersion>1.8</maven.compiler.compilerVersion>    
        </properties>     
    </profile> 
     
    <!--#maven配置jdk,在257行插入-->
       <activeProfiles>
          <activeProfile>jdk1.8</activeProfile>
       </activeProfiles>
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28

    3.安装jenkins

    3.1 下载

    docker pull jenkins/jenkins:2.319.1-lts

    3.2 创建yaml

    mkdir -p /data/jenkins/
    cd /data/jenkins/
    vim /data/jenkins/docker-compose.yml

    version: "3.1"
    services:
      jenkins:
        image: jenkins/jenkins
        container_name: jenkins
        ports:
          - 8080:8080
          - 50000:50000
        volumes:
          - ./data/:/var/jenkins_home/
          - /var/run/docker.sock:/var/run/docker.sock
          - /usr/bin/docker:/usr/bin/docker
          - /etc/docker/daemon.json:/etc/docker/daemon.json
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    3.3 启动jenkins
     
    #修改Jenkins用户权限
    cd /var/run
     
    chown root:root docker.sock
     
    #其他用户有读和写权限
    chmod o+rw docker.sock
     
     
    cd /data/jenkins/
    docker-compose up -d
     
    #授权
    chmod 777 /data/jenkins/data/
     
    cat /data/jenkins/data/hudson.model.UpdateCenter.xml
    #重新启动Jenkins容器后,由于Jenkins需要下载大量内容,但是由于默认下载地址下载速度较慢,
    #需要重新设置下载地址为国内镜像站# 清华大学的插件源也可以
    # 修改数据卷中的hudson.model.UpdateCenter.xml文件
    # 将下载地址替换为http://mirror.esuni.jp/jenkins/updates/update-center.json
     
    # 清华大学的插件源也可以
    #https://mirrors.tuna.tsinghua.edu.cn/jenkins/updates/update-center.json
     
    #重启
    docker-compose restart
     
    #查看日志
    docker logs -f jenkins
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7
    • 8
    • 9
    • 10
    • 11
    • 12
    • 13
    • 14
    • 15
    • 16
    • 17
    • 18
    • 19
    • 20
    • 21
    • 22
    • 23
    • 24
    • 25
    • 26
    • 27
    • 28
    • 29
    • 30

    3.4 访问页面,安装插件

    http://10.1.100.225:8080
    1.输入密码2.选择插件来安装3.点击安装

    4.jenkins插件安装

    中文界面>系统管理>插件管理>可选插件>搜索插件
    英文界面> Manage Jenkins–Manage Plugins-Available>搜索插件
    Locale
    Localization
    Git Parameter
    Publish Over SSH
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6

    5. 配置jenkins

    mv /usr/local/maven/ /data/jenkins/data/
    mv /usr/local/jdk/ /data/jenkins/data/

    5.1 加载本地jdk

    Dashboard>系统管理>全局工具配置>Add JDK>去掉对钩 (√)自动安装
    NAME

    jdk8
    
    • 1

    JAVA_HOME

    /var/jenkins_home/jdk/
    
    • 1
    5.1 加载本地maven

    Dashboard>系统管理>全局工具配置>Add Maven>去掉对钩 (√)自动安装
    NAME

    maven
    
    • 1

    JAVA_HOME

    /var/jenkins_home/maven/
    
    • 1

    Save Apply
    保存 应用

    运行mvn测试
    mvn help:system

    3.jenkins拉取测试

    系统管理>系统配置>Publish over SSH>SSH Servers>Add

    #自定义项目名称
    name

    test
    
    • 1

    #主机IP
    Hostname

    10.1.100.25
    
    • 1

    #主机用户名
    Username

    root
    
    • 1

    #拉取项目路径
    Remote Directory

    /data/work/mytest
    
    • 1

    点击高级
    √ Use password authentication, or use a different key

    #输入服务器密码
    Passphrase / Password

    xxxx
    
    • 1

    #点击 测试

    Test Configuration
    
    
    Save Apply
    保存 应用
    
    • 1
    • 2
    • 3
    • 4
    • 5

    4.Jenkins服务器设置免密登入k8s-mast服务器

    #Jenkins服务器-进入jenkins容器
    docker exec -it jenkins bash

    #进入jenkins容器-生成免密登录公私钥,根据提示按回车
    ssh-keygen -t rsa

    #进入jenkins容器-查看jenkins 秘钥
    cat /var/jenkins_home/.ssh/id_rsa.pub

    #k8s-mast服务器中authorized_keys 加入Jenkins服务器秘钥
    echo “xxxxxx” >> /root/.ssh/authorized_keys

    十四、开发环境部署IDEA

    工具下载:

    链接:https://pan.baidu.com/s/1Jkyh_kgrT2o388Xiujbdeg?pwd=b7rx
    提取码:b7rx

    1. windows配置maven 和jdk

    https://blog.csdn.net/weixin_46565024/article/details/122758111

    2. IDEA简单得项目创建

    File>New>Project
    
    Spring Initializr>Next
    
    Type(选择Maven)>Java Version (选择8) > Next
    
    Web> 勾选√Spring Web> Next>Finish
    
    • 1
    • 2
    • 3
    • 4
    • 5
    • 6
    • 7

    创建项目报错:Error:java:无效的源发行版:12

    解决方法:https://www.cnblogs.com/bJydsJ/p/16699429.html

  • 相关阅读:
    聊聊数据库建表的15个小技巧
    关于大数据测试,你一定要试试python的fake库
    你是怎么封装微信小程序的数据请求的?
    IDEA通过原型(骨架)创建MavenJavaWeb项目
    【批处理DOS-CMD命令-汇总和小结】-添加注释命令(rem或::)
    百度前端二面常考手写面试题总结
    redis运维(十)列表
    介绍串行和并行两种通信方式
    多测师肖sir_高级金牌讲师_jenkins搭建
    JavaSe-JAVA的多态
  • 原文地址:https://blog.csdn.net/qq_35583325/article/details/132719033