操作系统 | 配置 | 主机名 | IP |
---|---|---|---|
CentOS 7.9 | 2C4G | k8s-master | 192.168.93.101 |
CentOS 7.9 | 2C4G | k8s-node01 | 192.168.93.102 |
CentOS 7.9 | 2C4G | k8s-node02 | 192.168.93.103 |
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/^SELINUX=.*/SELINUX=disabled/g" /etc/selinux/config
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
[root@k8s-master ~]# cat >> /etc/hosts << EOF
192.168.93.101 k8s-master
192.168.93.102 k8s-node01
192.168.93.103 k8s-node02
EOF
[root@k8s-master ~]# yum -y install vim wget net-tools lrzsz unzip
[root@k8s-master ~]# swapoff -a
[root@k8s-master ~]# sed -i '/swap/s/^/#/' /etc/fstab
[root@k8s-master ~]# tail -1 /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0
[root@k8s-node01 ~]# yum -y install ntpdate && ntpdate ntp.aliyun.com
[root@k8s-master ~]# yum -y install yum-utils device-mapper-persistent-data lvm2
[root@k8s-master ~]# yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
[root@k8s-master ~]# yum makecache fast
# 安装指定版本的Docker(k8s和Docker的版本有非常严格的要求)
[root@k8s-master ~]# yum -y install docker-ce-19.03.15 docker-ce-cli-19.03.15
# 使用以下命令永久开启和开启Docker
[root@k8s-master ~]# systemctl enable docker --now
# 配置镜像加速, 以下设置了cgroup驱动和阿里云加速器地址
[root@k8s-master ~]# vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://u9noolvn.mirror.aliyuncs.com"]
}
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl restart docker
# 配置内核参数
[root@k8s-master ~]# cat >> /etc/sysctl.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# 往内核中加载模块
[root@k8s-master ~]# modprobe br_netfilter
[root@k8s-master ~]# sysctl -p
[root@k8s-master ~]# cat >> /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg \
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@k8s-master ~]# yum makecache fast
# 列出k8s版本信息
[root@k8s-master ~]# yum list kubectl --showduplicates | sort -r
# 安装指定版本
[root@k8s-master ~]# yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
# 设置kubelet为开机自启动(不要开启服务)
[root@k8s-master ~]# systemctl enable kubelet.service
# 生成初始化配置文件
[root@k8s-master ~]# kubeadm config print init-defaults > init-config.yaml
W0620 08:25:39.895580 9287 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[root@k8s-master ~]# vim init-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.93.101 # master节点的IP地址
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master # 如果使用域名保证可以解析,或直接使用IP地址
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers # 更改为阿里云镜像
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16 # podSubnet地址不能与主机物理地址设置为同一网段
scheduler: {}
# 查看所需镜像
[root@k8s-master ~]# kubeadm config images list --config init-config.yaml
W0620 08:28:04.815219 9306 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
registry.aliyuncs.com/google_containers/kube-apiserver:v1.18.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.18.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.18.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.18.0
registry.aliyuncs.com/google_containers/pause:3.2
registry.aliyuncs.com/google_containers/etcd:3.4.3-0
registry.aliyuncs.com/google_containers/coredns:1.6.7
# 拉取所需镜像
[root@k8s-master ~]# kubeadm config images pull --config=init-config.yaml
W0620 08:28:38.237777 9312 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.18.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.18.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.18.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.18.0
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.2
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.4.3-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.6.7
[root@k8s-master ~]# kubeadm init --config=init-config.yaml
W0620 08:30:29.869197 9486 configset.go:202] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[init] Using Kubernetes version: v1.18.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.93.101]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.93.101 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.93.101 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0620 08:30:32.099248 9486 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0620 08:30:32.100054 9486 manifests.go:225] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 15.002164 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.18" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
####################################################################
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
####################################################################
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
####################################################################
kubeadm join 192.168.93.101:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:9999ca93cd68cfa53f3da5773752e3faf182faa3ee5b429810c461b6f18ab742
####################################################################
# master节点操作
[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-node01 ~]# kubeadm join 192.168.93.101:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:9999ca93cd68cfa53f3da5773752e3faf182faa3ee5b429810c461b6f18ab742
[root@k8s-node02 ~]# kubeadm join 192.168.93.101:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:9999ca93cd68cfa53f3da5773752e3faf182faa3ee5b429810c461b6f18ab742
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 3m11s v1.18.0
k8s-node01 NotReady <none> 59s v1.18.0
k8s-node02 NotReady <none> 38s v1.18.0
[root@k8s-master ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
poddisruptionbudget.policy/calico-kube-controllers created
# 查看所有命名空间的Pod资源(必须所部是running)
[root@k8s-master ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system calico-kube-controllers-858fbfbc9-vrlxn 0/1 Running 3 2m53s
kube-system calico-node-qkvkz 1/1 Running 0 2m53s
kube-system calico-node-x54lm 1/1 Running 0 2m53s
kube-system calico-node-zbs6c 1/1 Running 0 2m53s
kube-system coredns-7ff77c879f-8pl9d 0/1 Running 0 11m
kube-system coredns-7ff77c879f-lmdk8 0/1 Running 0 11m
kube-system etcd-k8s-master 1/1 Running 0 11m
kube-system kube-apiserver-k8s-master 1/1 Running 0 11m
kube-system kube-controller-manager-k8s-master 1/1 Running 0 11m
kube-system kube-proxy-758cc 1/1 Running 0 9m47s
kube-system kube-proxy-flvgv 1/1 Running 0 9m26s
kube-system kube-proxy-nwg7d 1/1 Running 0 11m
kube-system kube-scheduler-k8s-master 1/1 Running 0 11m
# 查看node集群状态
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 12m v1.18.0
k8s-node01 Ready <none> 10m v1.18.0
k8s-node02 Ready <none> 10m v1.18.0
# 创建命名空间
[root@k8s-master ~]# kubectl create ns policy-demo
namespace/policy-demo created
# 在policy-demo命名空间中创建两个副本的Nginx Pod
[root@k8s-master ~]# kubectl run --namespace=policy-demo nginx --replicas=2 --image=nginx
Flag --replicas has been deprecated, has no effect and will be removed in the future.
pod/nginx created
# 若出现如上“Flag --replicas has been deprecated, has no effect and will be removed in the future.”报错,说明所使用的K8S是v1.18.0之前的版本,而K8S v1.18.0以后的版本中--replicas已经被启用,推荐使用Deployment创建Pods
# 创建刚刚所创建的pod
[root@k8s-master ~]# kubectl delete pod nginx -n policy-demo
pod "nginx" deleted
[root@k8s-master ~]# vim nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: policy-demo
labels:
app: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
[root@k8s-master ~]# kubectl apply -f nginx-deployment.yaml
deployment.apps/nginx created
# 通过服务暴露Nginx的80端口
[root@k8s-master ~]# kubectl expose --namespace=policy-demo deployment nginx --port=80
service/nginx exposed
# 查询policy-demo命名空间中的所有资源
[root@k8s-master ~]# kubectl get all -n policy-demo
NAME READY STATUS RESTARTS AGE
pod/nginx-d46f5678b-8c9br 1/1 Running 0 76s
pod/nginx-d46f5678b-rwkrr 1/1 Running 0 76s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx ClusterIP 10.102.145.27 <none> 80/TCP 45s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 2/2 2 2 76s
NAME DESIRED CURRENT READY AGE
replicaset.apps/nginx-d46f5678b 2 2 2 76s
# 通过busybox的Pod去访问Nginx服务
[root@k8s-master ~]# kubectl run --namespace=policy-demo access --rm -it --image busybox /bin/sh
If you don't see a command prompt, try pressing enter.
/ # wget -q nginx -O -
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@k8s-master ~]# kubectl create -f - << EOF
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: default-deny
namespace: policy-demo
spec:
podSelector:
matchLabels: {}
EOF
networkpolicy.networking.k8s.io/default-deny created
[root@k8s-master ~]# kubectl run --namespace=policy-demo access --rm -it --image busybox /bin/sh
If you don't see a command prompt, try pressing enter.
/ # wget -q --timeout=5 nginx -O -
wget: download timed out # 连接超时
[root@k8s-master ~]# kubectl create -f - << EOF
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: access-nginx
namespace: policy-demo
spec:
podSelector:
matchLabels:
app: nginx
ingress:
- from:
- podSelector:
matchLabels:
run: access
EOF
networkpolicy.networking.k8s.io/access-nginx created
# 从accessPod访问该服务
[root@k8s-master ~]# kubectl run --namespace=policy-demo access --rm -it --image busybox /bin/sh
If you don't see a command prompt, try pressing enter.
/ # wget -q --timeout=5 nginx -O -
Welcome to nginx!
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.
For online documentation and support please refer to
nginx.org.
Commercial support is available at
nginx.com.
Thank you for using nginx.
# 如果没有标签run: access,仍然无法访问服务
[root@k8s-master ~]# kubectl run --namespace=policy-demo cant-access --rm -ti -image busybox /bin/sh
Error: unknown shorthand flag: 'm' in -mage
See 'kubectl run --help' for usage.
[root@k8s-master ~]# kubectl run --namespace=policy-demo cant-access --rm -ti --image busybox /bin/sh
If you don't see a command prompt, try pressing enter.
/ # wget -q --timeout=5 nginx -O -
wget: download timed out
[root@k8s-master ~]# kubectl delete ns policy-demo
namespace "policy-demo" deleted
[root@k8s-master ~]# kubectl create ns advanced-policy-demo
namespace/advanced-policy-demo created
# 使用TAML文件创建Nginx服务
[root@k8s-master ~]# vim nginx-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: advanced-policy-demo
labels:
app: nginx
spec:
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
ports:
- containerPort: 80
[root@k8s-master ~]# kubectl apply -f nginx-deployment.yaml
deployment.apps/nginx created
[root@k8s-master ~]# kubectl expose --namespace=advanced-policy-demo deployment nginx --port=80
service/nginx exposed
# 验证访问权限并访问百度测试外网连通性
[root@k8s-master ~]# kubectl run --namespace=advanced-policy-demo access --rm -it --image busybox /bin/sh
If you don't see a command prompt, try pressing enter.
/ # wget -q --timeout=5 nginx -O -
Welcome to nginx!
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.
For online documentation and support please refer to
nginx.org.
Commercial support is available at
nginx.com.
Thank you for using nginx.
/ # wget -q --time=5 www.baidu.com -O -
百度一下,你就知道