废话不说,直接操作
注:本次实验使用centos7.9
参考
https://blog.csdn.net/wuxingge/article/details/122234384
参考
https://blog.csdn.net/wuxingge/article/details/119462915
yum install -y yum-utils device-mapper-persistent-data lvm2 wget
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum install docker-ce -y
vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://q2gr04ke.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
项目地址
https://github.com/Mirantis/cri-dockerd
下载rpm包
cri-dockerd-0.2.2.20220610195206.0737013-0.el7.x86_64.rpm
启动文件修改
vim /usr/lib/systemd/system/cri-docker.service
修改
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
完整配置文件
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint fd:// --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=kubernetes repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
enabled=1
EOF
yum install kubelet-1.24.0 kubeadm-1.24.0 kubectl-1.24.0 -y
systemctl enable kubelet
vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS=
KUBELET_KUBEADM_ARGS="--container-runtime=remote --container-runtime-endpoint=/var/run/cri-dockerd.sock"
参考
https://blog.csdn.net/wuxingge/article/details/119462915
集群代理IP
192.168.41.10
stream {
upstream kube-apiserver {
server 192.168.41.45:6443 max_fails=3 fail_timeout=30s;
server 192.168.41.46:6443 max_fails=3 fail_timeout=30s;
server 192.168.41.48:6443 max_fails=3 fail_timeout=30s;
}
server {
listen 6443;
proxy_connect_timeout 2s;
proxy_timeout 900s;
proxy_pass kube-apiserver;
}
}
kubeadm init --control-plane-endpoint 192.168.41.10:6443 --kubernetes-version=v1.24.0 --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr=172.90.0.0/16 --service-cidr=10.96.0.0/12 --cri-socket unix:///var/run/cri-dockerd.sock --upload-certs
初始化过程
...
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.41.10:6443 --token r4iexy.l0tpt3b80xv22434 \
--discovery-token-ca-cert-hash sha256:a5715480b51321ebd720c397bbd9d0b65dd676c791ef21b1dde13576760b2ada \
--control-plane --certificate-key 028946f2af6983e602dbceddb3c99743eb14f868d3534f6d04963836863b99bb --cri-socket unix:///var/run/cri-dockerd.sock
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.41.10:6443 --token r4iexy.l0tpt3b80xv22434 \
--discovery-token-ca-cert-hash sha256:a5715480b51321ebd720c397bbd9d0b65dd676c791ef21b1dde13576760b2ada --cri-socket unix:///var/run/cri-dockerd.sock
...
也可以使用配置文件
vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.41.45
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/cri-dockerd.sock
imagePullPolicy: IfNotPresent
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
extraArgs:
authorization-mode: Node,RBAC
service-node-port-range: 1-65535
feature-gates: RemoveSelfLink=false
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.41.10:6443
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.24.0
networking:
dnsDomain: cluster.local
podSubnet: 172.90.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
#列出镜像
kubeadm config images list --config kubeadm-config.yaml
#下载镜像
kubeadm config images pull --config kubeadm-config.yaml
#基于配置文件初始化k8s
kubeadm init --config kubeadm-config.yaml --upload-certs
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
修改
net-conf.json: |
{
"Network": "10.244.0.0/16", 修改为 172.90.0.0/16 与podSubnet一致
"Backend": {
"Type": "vxlan"
}
}
kubectl -n kube-system edit configmaps kube-proxy
修改
mode: "ipvs"
#重启kube-proxy
kubectl rollout restart daemonset kube-proxy -n kube-system
vim /etc/kubernetes/manifests/kube-apiserver.yaml
- --service-node-port-range=1-65535
修改完kube-apiserver会自动重启
vim /var/lib/kubelet/config.yaml
#最后添加
maxPods: 250
重启kubelet
systemctl restart kubelet