1、集群类型
Kubernetes集群大体上分为两类:一主多从和多主多从
一主多从:一台master节点和多台node节点,搭建简单,但是有单机故障风险,适用于测试环境
多主多从:多台master节点和多台node节点,搭建麻烦,安全性高,适用于生产环境
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-EmBUY3nd-1668961295025)(./1668671643610.png)]
2、安装方式
Kubernetes有多种部署方式,目前主流的方式有kubeadm、minikube、二进制包
1、Minikube:一个用于快速搭建单节点kubernetes的工具
2、Kubeadm:一个用于快速搭建kubernetes集群的工具,https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm/
3、二进制包:从官网下载每个组件的二进制包,依次去安装,此方式对于理解kubernetes组件更加有效,https://github.com/kubernetes/kubernetes
说明:现在需要安装kubernetes的集群环境,但是又不想过于麻烦,所有选择使用kubeadm方式
角色 ip地址 组件
master:192.168.78.130 docker,kubectl,kubeadm,kubelet
node1: 192.168.78.131 docker,kubectl,kubeadm,kubelet
node2: 192.168.78.132 docker,kubectl,kubeadm,kubelet
安装虚拟机过程中注意下面选项的设置:
1、操作系统环境:cpu2个 内存2G 硬盘50G centos7+
2、语言:中文简体/英文
3、软件选择:基础设施服务器
4、分区选择:自动分区/手动分区
5、网络配置:按照下面配置网络地址信息
网络地址:192.168.78.(130、131、132)
子网掩码:255.255.255.0
默认网关:192.168.78.2
DNS:8.8.8.8
6、主机名设置:
Master节点:master
Node节点:node1
Node节点:node2
此方式安装kubernetes集群要求Centos版本要在7及以上
[root@master ~]# cat /etc/redhat-release
CentOS Stream release 8
[root@node1 ~]# cat /etc/redhat-release
CentOS Stream release 8
[root@node2 ~]# cat /etc/redhat-release
CentOS Stream release 8
为了方便集群节点间的直接调用,在这里配置一下主机名解析
[root@master ~]# vim /etc/hosts
192.178.78.130 master.example.com master
192.168.78.131 node1.example.com node1
192.168.78.132 node2.example.com node2
kubernetes要求集群中的节点时间必须精确一致,这里使用chronyd服务从网络同步时间
[root@master ~]# vim /etc/chrony.conf
local stratum 10
[root@master ~]# systemctl restart chronyd
[root@master ~]# systemctl enable chronyd
[root@master ~]# hwclock -w
[root@node1 ~]# vim /etc/chrony.conf
server master.example.com iburst
[root@node1 ~]# systemctl restart chronyd
[root@node1 ~]# systemctl enable chronyd
[root@node1 ~]# hwclock -w
[root@node2 ~]# vim /etc/chrony.conf
server master.example.com iburst
[root@node2 ~]# systemctl restart chronyd
[root@node2 ~]# systemctl enable chronyd
[root@node2 ~]# hwclock -w
关闭防火墙、selinux,postfix----3台主机都配置
[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
[root@master ~]# vim /etc/selinux/config
SELINUX=disabled
[root@node1 ~]# systemctl stop firewalld
[root@node1 ~]# systemctl disable firewalld
[root@node1 ~]# vim /etc/selinux/config
SELINUX=disabled
[root@node2 ~]# systemctl stop firewalld
[root@node2 ~]# systemctl disable firewalld
[root@node2 ~]# vim /etc/selinux/config
SELINUX=disabled
[root@master ~]# systemctl stop postfix
[root@master ~]# systemctl disable postfix
[root@node1 ~]# systemctl stop postfix
[root@node1 ~]# systemctl disable postfix
[root@node2 ~]# systemctl stop postfix
[root@node2 ~]# systemctl disable postfix
vim /etc/fstab
注释掉swap分区那一行
swapoff -a
[root@master ~]# vim /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node1 ~]# vim /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node1 ~]# modprobe br_netfilter
[root@node1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node2 ~]# vim /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node2 ~]# modprobe br_netfilter
[root@node2 ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
[root@master ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
[root@master ~]# bash /etc/sysconfig/modules/ipvs.modules
[root@master ~]# lsmod | grep -e ip_vs
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 172032 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 172032 1 ip_vs
nf_defrag_ipv6 20480 2 nf_conntrack,ip_vs
libcrc32c 16384 3 nf_conntrack,xfs,ip_vs
[root@master ~]# reboot
[root@node1 ~]# vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
[root@node1 ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
[root@node1 ~]# bash /etc/sysconfig/modules/ipvs.modules
[root@node1 ~]# lsmod | grep -e ip_vs
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 172032 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 172032 1 ip_vs
nf_defrag_ipv6 20480 2 nf_conntrack,ip_vs
libcrc32c 16384 3 nf_conntrack,xfs,ip_vs
[root@node1 ~]# reboot
[root@node2 ~]# vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
[root@node2 ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
[root@node2 ~]# bash /etc/sysconfig/modules/ipvs.modules
[root@node2 ~]# lsmod | grep -e ip_vs
ip_vs_sh 16384 0
ip_vs_wrr 16384 0
ip_vs_rr 16384 0
ip_vs 172032 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 172032 1 ip_vs
nf_defrag_ipv6 20480 2 nf_conntrack,ip_vs
libcrc32c 16384 3 nf_conntrack,xfs,ip_vs
[root@node2 ~]# reboot
[root@master yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@master yum.repos.d]# dnf -y install epel-release
[root@master yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@node1 yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@mode1 yum.repos.d]# dnf -y install epel-release
[root@node1 yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@node2 yum.repos.d]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@node2 yum.repos.d]# dnf -y install epel-release
[root@node2 yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@master ~]# dnf -y install docker-ce --allowerasing
[root@master ~]# systemctl restart docker
[root@master ~]# systemctl enable docker
[root@node1 ~]# dnf -y install docker-ce --allowerasing
[root@node1 ~]# systemctl restart docker
[root@node1 ~]# systemctl enable docker
[root@node2 ~]# dnf -y install docker-ce --allowerasing
[root@node2 ~]# systemctl restart docker
[root@node2 ~]# systemctl enable docker
[root@master ~]# cat > /etc/docker/daemon.json << EOF
> {
> "registry-mirrors": ["https://14lrk6zd.mirror.aliyuncs.com"],
> "exec-opts": ["native.cgroupdriver=systemd"],
> "log-driver": "json-file",
> "log-opts": {
> "max-size": "100m"
> },
> "storage-driver": "overlay2"
> }
> EOF
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker
[root@node1 ~]# cat > /etc/docker/daemon.json << EOF
> {
> "registry-mirrors": ["https://14lrk6zd.mirror.aliyuncs.com"],
> "exec-opts": ["native.cgroupdriver=systemd"],
> "log-driver": "json-file",
> "log-opts": {
> "max-size": "100m"
> },
> "storage-driver": "overlay2"
> }
> EOF
[root@node1 ~]# systemctl daemon-reload
[root@node1 ~]# systemctl restart docker
[root@node2 ~]# cat > /etc/docker/daemon.json << EOF
> {
> "registry-mirrors": ["https://14lrk6zd.mirror.aliyuncs.com"],
> "exec-opts": ["native.cgroupdriver=systemd"],
> "log-driver": "json-file",
> "log-opts": {
> "max-size": "100m"
> },
> "storage-driver": "overlay2"
> }
> EOF
[root@node2 ~]# systemctl daemon-reload
[root@node2 ~]# systemctl restart docker
[root@master ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@node1 ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@node2 ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@master ~]# dnf -y install kubeadm kubelet kubectl
[root@master ~]# systemctl restart kubelet.service
[root@master ~]# systemctl enable kubelet.service
[root@node1 ~]# dnf -y install kubeadm kubelet kubectl
[root@node1 ~]# systemctl restart kubelet.service
[root@node1 ~]# systemctl enable kubelet.service
[root@node2 ~]# dnf -y install kubeadm kubelet kubectl
[root@node2 ~]# systemctl restart kubelet.service
[root@node2 ~]# systemctl enable kubelet.service
为确保后面集群初始化及加入集群能够成功执行,需要配置containerd的配置文件/etc/containerd/config.toml,此操作需要在所有节点执行
[root@master ~]# containerd config default > /etc/containerd/config.toml
[root@node1 ~]# containerd config default > /etc/containerd/config.toml
[root@node2 ~]# containerd config default > /etc/containerd/config.toml
将/etc/containerd/config.toml文件中的k8s镜像仓库改为registry.aliyuncs.com/google_containers
然后重启并设置containerd服务开机自启
[root@master ~]# kubeadm init \
> --apiserver-advertise-address=192.168.78.130 \
> --image-repository registry.aliyuncs.com/google_containers \
> --kubernetes-version v1.25.4 \
> --service-cidr=10.96.0.0/12 \
> --pod-network-cidr=10.244.0.0/16
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-eJXjuyX8-1668961295026)(./1668674260336.png)]
//建议将初始化内容保存在某个文件中
配置环境变量
[root@master ~]# vim /etc/profile.d/k8s.sh
export KUBECONFIG=/etc/kubernetes/admin.conf
[root@master ~]# source /etc/profile.d/k8s.sh
[root@master ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@master ~]# kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@node1 ~]# kubeadm join 192.168.78.130:6443 --token z07fle.af29qpvmh8qegr2s \
> --discovery-token-ca-cert-hash sha256:d3bbbe5fbba074db89023f1a1dcf7af5c8c4c95c79dfe1962a183c0193782517
[root@node2 ~]# kubeadm join 192.168.78.130:6443 --token z07fle.af29qpvmh8qegr2s \
> --discovery-token-ca-cert-hash sha256:d3bbbe5fbba074db89023f1a1dcf7af5c8c4c95c79dfe1962a183c0193782517
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master.example.com Ready control-plane 15m v1.25.4
node1.example.com Ready 103s v1.25.4
node2.example.com Ready 100s v1.25.4
[root@master ~]# kubectl create deployment nginx --image nginx
deployment.apps/nginx created
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-76d6c9b8c-bslt7 1/1 Running 0 57s
[root@master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-76d6c9b8c-bslt7 1/1 Running 0 71s 10.244.2.2 node2.example.com
[root@master ~]# kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 443/TCP 18m
测试
[root@master ~]# curl 10.244.2.2:80
Welcome to nginx!
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.
For online documentation and support please refer to
nginx.org.
Commercial support is available at
nginx.com.
Thank you for using nginx.