master: 192.168.10.138
node1: 192.168.10.139
node2: 192.168.10.140
更改主机映射,所有机器都得做
vim /etc/hosts
192.168.10.138 master
192.168.10.139 node1
192.168.10.140 node2
【master】
ssh master #输入yes,账户密码
ssh-keygen -t rsa #然后一直敲回车
ssh-copy-id master #输入yes,账户密码
ssh-copy-id node1
ssh-copy-id node2
【node1】
ssh node1 #输入yes,账户密码
ssh-keygen -t rsa #然后一直敲回车
ssh-copy-id master #输入yes,账户密码
ssh-copy-id node1
ssh-copy-id node2
【node2】
ssh node2 #输入yes,账户密码
ssh-keygen -t rsa #然后一直敲回车
ssh-copy-id master #输入yes,账户密码
ssh-copy-id node1
ssh-copy-id node2
主机最好都相互测试一下,确保没有问题
[root@master ~]# ssh node1
[root@master ~]# ssh node2
master
vim /etc/yum.repos.d/rhel.repo
[BaseOS]
name=BaseOS
baseurl=/mnt/BaseOS
gpgcheck=0
[AppStream]
name=AppStream
baseurl=/mnt/AppStream
gpgcheck=0
vim /etc/yum.repos.d/k8s.repo
[k8s]
name=k8s
baseurl=http://mirrors.ustc.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
拷贝到node1、node2目录下
scp /etc/yum.repos.d/rhel.repo root@node1:/etc/yum.repos.d/
scp /etc/yum.repos.d/rhel.repo root@node2:/etc/yum.repos.d/
scp /etc/yum.repos.d/k8s.repo root@node1:/etc/yum.repos.d/
scp /etc/yum.repos.d/k8s.repo root@node2:/etc/yum.repos.d/
所有机器都要做
systemctl stop firewalld
systemctl disable firewalld
所有机器都要做
vim /etc/sysconfig/selinux
SELINUX=permissive
[root@master ~]# setenforce 0
[root@master ~]# getenforce
Permissive
所有机器都要做
systemctl restart chronyd
systemctl enable chronyd
所有机器都要做
vim /etc/fstab
注释掉
#/dev/mapper/rhel-swap none swap defaults 0 0
swapon -s
swapoff /dev/dm-1
所有机器都要做
vim /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@master ~]# sysctl -p
[root@master ~]# modprode br_netfilter
[root@master ~]# lsmod | grep br_netfilter
所有机器都要做,node节点注意变换ip地址
nmcli connection modify ens160 ipv4.method manual ipv4.addresses 192.168.10.138/24 ipv4.gateway 192.168.10.2 ipv4.dns 114.114.114.114
nmcli connection modify ens160 connection.autoconnect yes
nmcli connection up ens160
所有机器都要做
docker安装部署请参考:https://blog.csdn.net/weixin_44508518/article/details/127126886
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
所有机器都要做
#如果系统有podman容器:dnf remove podman -y
dnf install -y iproute-tc yum-utils device-mapper-persistent-data lvm2 kubelet-1.21.3 kubeadm-1.21.3 kubectl-1.21.3 docker-ce
systemctl enable kubelet
systemctl enable --now docker
所有机器都要做
vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://8zs3633v.mirror.aliyuncs.com"]
}
重启docker:systemctl restart docker
所有机器都要做
kubeadm config images list
所有机器都要做
docker pull kittod/kube-apiserver:v1.21.3
docker pull kittod/kube-controller-manager:v1.21.3
docker pull kittod/kube-scheduler:v1.21.3
docker pull kittod/kube-proxy:v1.21.3
docker pull kittod/pause:3.4.1
docker pull kittod/etcd:3.4.13-0
docker pull kittod/coredns:v1.8.0
docker pull kittod/flannel:v0.14.0
所有机器都要做
docker tag kittod/kube-apiserver:v1.21.3 k8s.gcr.io/kube-apiserver:v1.21.3
docker tag kittod/kube-controller-manager:v1.21.3 k8s.gcr.io/kube-controller-manager:v1.21.3
docker tag kittod/kube-scheduler:v1.21.3 k8s.gcr.io/kube-scheduler:v1.21.3
docker tag kittod/kube-proxy:v1.21.3 k8s.gcr.io/kube-proxy:v1.21.3
docker tag kittod/pause:3.4.1 k8s.gcr.io/pause:3.4.1
docker tag kittod/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0
docker tag kittod/coredns:v1.8.0 k8s.gcr.io/coredns/coredns:v1.8.0
docker tag kittod/flannel:v0.14.0 quay.io/coreos/flannel:v0.14.0
Tips:为什么要删除?因为k8s要求k8s.gcr.io格式
所有机器都要做
docker rmi kittod/kube-apiserver:v1.21.3
docker rmi kittod/kube-controller-manager:v1.21.3
docker rmi kittod/kube-scheduler:v1.21.3
docker rmi kittod/kube-proxy:v1.21.3
docker rmi kittod/pause:3.4.1
docker rmi kittod/etcd:3.4.13-0
docker rmi kittod/coredns:v1.8.0
docker rmi kittod/flannel:v0.14.0
所有机器都要做
docker pull nginx
docker tag nginx:latest kittod/nginx:1.21.5
master运行,注意ip地址是master的主机地址
kubeadm init \
--kubernetes-version=v1.21.3 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--apiserver-advertise-address=192.168.10.138
初始化成功会显示以下信息,把内容复制到某个地方备用
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.10.138:6443 --token i01jev.0zb0ksde9q4dntv7 \
--discovery-token-ca-cert-hash sha256:06247ddf935889efb7b4b9ee85d8c3a639b7160b8c61849aa75244aad391ce49
创建目录
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
导出环境变量
export KUBECONFIG=/etc/kubernetes/admin.conf
如果初始化不成功
systemctl stop kubelet
rm -rf /etc/kubernetes/*
systemctl stop docker
如果停止失败 reboot
docker container prune
docker ps -a
如果没有容器说明删干净了
rm -rf /var/lib/kubelet/
rm -rf /var/lib/etcd
kubeadm join 192.168.10.138:6443 --token i01jev.0zb0ksde9q4dntv7 \
--discovery-token-ca-cert-hash sha256:06247ddf935889efb7b4b9ee85d8c3a639b7160b8c61849aa75244aad391ce49
如果加入失败
1、
kubeadm reset -y
2、
rm -rf /etc/kubernetes/kubelet.conf
rm -rf /etc/kubernetes/pki/ca.crt
systemctl restart kubelet
如果忘了master初始化完成之后的节点加入集群指令,请在master主机输入以下指令:kubeadm token create --print-join-command
所有机器都要执行
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl get nodes
状态显示 Ready 即搭建成功
kubectl get pod -n kube-system
状态显示 Running 即服务正常
journalctl -f -u kubelet
如果节点状态为notready,可以查看节点日志,大多原因是镜像拉取失败
node节点操作
docker pull kittod/pause:3.4.1
docker tag kittod/pause:3.4.1 k8s.gcr.io/pause:3.4.1
docker pull kittod/kube-proxy:v1.21.3
docker tag kittod/kube-proxy:v1.21.3 k8s.gcr.io/kube-proxy:v1.21.3
reboot
echo "source <(kubectl completion bash)">> /root/.bashrc
source /root/.bashrc
docker pull nginx
docker pull nginx
docker tag nginx:latest kittod/nginx:1.21.5
kubectl create deployment nginx --image=kittod/nginx:1.21.5
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pods,service
netstat -lntup | grep 30365
[root@master ~]# curl 192.168.10.138:30365
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>