目录
4、将桥接的 IPv4 流量传递到 iptables 的链(三个节点同步)
6、配置K8S源、安装 kubelet、kubeadm、kubectl 组件(三个节点同步)
主机 | IP |
master | 192.168.159.10 |
node01 | 192.168.159.13 |
node02 | 192.168.159.11 |
本实验使用kubeadm的方式进行安装
[root@zwb_master ~]# systemctl stop firewalld ## 关闭防火墙
[root@zwb_master ~]# systemctl disable firewalld ## 禁止开机启动
[root@zwb_master ~]# setenforce 0 ## 关闭信息安全中心
setenforce: SELinux is disabled
[root@zwb_master ~]# swapoff -a ## 关闭swap分区
[root@zwb_master ~]# free -g ## 查看swap,显示已关闭
total used free shared buff/cache available
Mem: 3 0 2 0 0 3
Swap: 0 0 0
[root@zwb_node02 ~]# vim /etc/hosts
......................................
192.168.159.10 master
192.168.159.11 node02
192.168.159.13 node03
[root@zwb_master ~]# cat > /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOFsysctl --system ## 重新加载
[root@master yum.repos.d]# ntpdate ntp1.aliyun.com #### 时间同步 4 Nov 16:53:27 ntpdate[10411]: adjust time server 120.25.115.20 offset -0.002465 sec
[root@zwb_master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
### 安装依赖环境[root@zwb_master ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo #### 设置阿里云镜像源
[root@zwb_master ~]# yum install -y docker-ce ## 安装社区版docker
[root@zwb_master ~]# systemctl enable docker.service --now ## 设置docker为开机 启动且立即启动
[root@zwb_master ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
> https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF[root@zwb_master ~]# yum install -y kubelet-1.21.3 kubeadm-1.21.3 kubectl-1.21.3
[root@zwb_master ~]# systemctl enable kubelet.service --now ## 设置为开机启动,且 立即运行
[root@zwb_master ~]# kubeadm init --apiserver-advertise-address=192.168.226.130 --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --kubernetes-version v1.21.3 --service-cidr=10.125.0.0/16 --pod-network-cidr=10.150.0.0/16
[root@zwb_master ~]# mkdir -p $HOME/.kube ## 提权
[root@zwb_master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@zwb_master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
#复制、记录申请加入集群命令(在两个node 节点执行)
kubeadm join 192.168.159.10:6443 --token z78qqi.94i3znnuu0sundzr \
--discovery-token-ca-cert-hash sha256:4521a93aefff86da70790811841cb25885bb7ed4e0b338ed0cc194f5b6127129
## 上传kube-flannel.yml
文件内容:
---
kind: Namespace
apiVersion: v1
metadata:
name: kube-flannel
labels:
pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-flannel
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.150.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-flannel
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: flannelcni/flannel:v0.19.2 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.19.2
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
[root@zwb_master ~]# cd /opt
[root@zwb_master opt]# ls
kube-flannel.yml[root@zwb_master opt]# kubectl apply -f kube-flannel.yml
###查看所有名词空间
[root@zwb_master opt]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-7clld 1/1 Running 0 60s
kube-flannel kube-flannel-ds-psgvb 1/1 Running 0 60s
kube-flannel kube-flannel-ds-xxncr 1/1 Running 0 60s
kube-system coredns-6f6b8cc4f6-lbvl5 1/1 Running 0 10m
kube-system coredns-6f6b8cc4f6-m6brz 1/1 Running 0 10m
kube-system etcd-master 1/1 Running 0 10m
kube-system kube-apiserver-master 1/1 Running 0 10m
kube-system kube-controller-manager-master 1/1 Running 0 10m
kube-system kube-proxy-jwpnz 1/1 Running 0 6m2s
kube-system kube-proxy-xqcqm 1/1 Running 0 6m7s
kube-system kube-proxy-z6rhl 1/1 Running 0 10m
kube-system kube-scheduler-master 1/1 Running 0 10m
### 输出指定名称空间
[root@zwb_master opt]# kubectl get pods -n kube-flannel
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-7clld 1/1 Running 0 6m6s
kube-flannel-ds-psgvb 1/1 Running 0 6m6s
kube-flannel-ds-xxncr 1/1 Running 0 6m6s
### 查看,发现两个Unhealthy
[root@zwb_master opt]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
controller-manager Unhealthy Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused
etcd-0 Healthy {"health":"true"}## 修改配置
[root@zwb_master ~]# cd /etc/kubernetes/manifests/
[root@zwb_master manifests]# ls
etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml
[root@zwb_master manifests]# vim kube-controller-manager.yaml
第26行注销
[root@zwb_master manifests]# vim kube-scheduler.yaml
## 查看状态已成功
[root@zwb_master manifests]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}[root@zwb_master manifests]# kubectl get pods -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-7clld 1/1 Running 0 26m
kube-flannel kube-flannel-ds-psgvb 1/1 Running 0 26m
kube-flannel kube-flannel-ds-xxncr 1/1 Running 0 26m
kube-system coredns-6f6b8cc4f6-lbvl5 1/1 Running 0 36m
kube-system coredns-6f6b8cc4f6-m6brz 1/1 Running 0 36m
kube-system etcd-master 1/1 Running 0 36m
kube-system kube-apiserver-master 1/1 Running 0 36m
kube-system kube-controller-manager-master 1/1 Running 0 2m53s
kube-system kube-proxy-jwpnz 1/1 Running 0 31m
kube-system kube-proxy-xqcqm 1/1 Running 0 31m
kube-system kube-proxy-z6rhl 1/1 Running 0 36m
kube-system kube-scheduler-master 1/1 Running 0 2m5s
[root@zwb_master manifests]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 44m v1.21.3
node01 Ready39m v1.21.3
node02 Ready39m v1.21.3
### 打标签[root@zwb_master manifests]# kubectl label node node01 node-role.kubernetes.io/node=node
node/node01 labeled
[root@zwb_master manifests]# kubectl label node node02 node-role.kubernetes.io/node=node
node/node02 labeled
[root@zwb_master manifests]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 47m v1.21.3
node01 Ready node 42m v1.21.3
node02 Ready node 42m v1.21.3