# 创建Pod[root@k8s-master01 pod]# kubectl apply -f pod-base.yaml
pod/pod-base created
# 查看Pod状况# READY 1/2 : 表示当前Pod中有2个容器,其中1个准备就绪,1个未就绪# RESTARTS : 重启次数,因为有1个容器故障了,Pod一直在重启试图恢复它[root@k8s-master01 pod]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pod-base 1/2 Running 4 95s
# 可以通过describe查看内部的详情# 此时已经运行起来了一个基本的Pod,虽然它暂时有问题[root@k8s-master01 pod]# kubectl describe pod pod-base -n dev
1
2
3
4
5
6
7
8
9
10
11
12
13
14
1.2.2 镜像拉取
创建pod-imagepullpolicy.yaml文件,内容如下:
apiVersion: v1
kind: Pod
metadata:name: pod-imagepullpolicy
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
imagePullPolicy: Never # 用于设置镜像拉取策略-name: busybox
image: busybox:1.30
# 创建Pod[root@k8s-master01 pod]# kubectl create -f pod-imagepullpolicy.yaml
pod/pod-imagepullpolicy created
# 查看Pod详情# 此时明显可以看到nginx镜像有一步Pulling image "nginx:1.17.1"的过程[root@k8s-master01 pod]# kubectl describe pod pod-imagepullpolicy -n dev......Events:
Type Reason Age From Message
-------------------------
Normal Scheduled > default-scheduler Successfully assigned dev/pod-imagePullPolicy to node1
Normal Pulling 32s kubelet, node1 Pulling image "nginx:1.17.1"
Normal Pulled 26s kubelet, node1 Successfully pulled image "nginx:1.17.1"
Normal Created 26s kubelet, node1 Created container nginx
Normal Started 25s kubelet, node1 Started container nginx
Normal Pulled 7s (x3 over 25s) kubelet, node1 Container image "busybox:1.30" already present on machine
Normal Created 7s (x3 over 25s) kubelet, node1 Created container busybox
Normal Started 7s (x3 over 25s) kubelet, node1 Started container busybox
apiVersion: v1
kind: Pod
metadata:name: pod-resources
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
resources:# 资源配额limits:# 限制资源(上限)cpu:"2"# CPU限制,单位是core数memory:"10Gi"# 内存限制requests:# 请求资源(下限)cpu:"1"# CPU限制,单位是core数memory:"10Mi"# 内存限制
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
在这对cpu和memory的单位做一个说明:
cpu:core数,可以为整数或小数
memory: 内存大小,可以使用Gi、Mi、G、M等形式
# 运行Pod[root@k8s-master01 ~]# kubectl create -f pod-resources.yaml
pod/pod-resources created
# 查看发现pod运行正常[root@k8s-master01 ~]# kubectl get pod pod-resources -n dev
NAME READY STATUS RESTARTS AGE
pod-resources 1/1 Running 0 39s
# 接下来,停止Pod[root@k8s-master01 ~]# kubectl delete -f pod-resources.yaml
pod "pod-resources" deleted
# 编辑pod,修改resources.requests.memory的值为10Gi[root@k8s-master01 ~]# vim pod-resources.yaml# 再次启动pod[root@k8s-master01 ~]# kubectl create -f pod-resources.yaml
pod/pod-resources created
# 查看Pod状态,发现Pod启动失败[root@k8s-master01 ~]# kubectl get pod pod-resources -n dev -o wide
NAME READY STATUS RESTARTS AGE
pod-resources 0/1 Pending 0 20s
# 查看pod详情会发现,如下提示[root@k8s-master01 ~]# kubectl describe pod pod-resources -n dev......Warning FailedScheduling 35s default-scheduler 0/3 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master:}, that the pod didn't tolerate, 2 Insufficient memory.(内存不足)
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-exec
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
ports:-name: nginx-port
containerPort:80livenessProbe:exec:command:["/bin/cat","/tmp/hello.txt"]# 执行一个查看文件的命令
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
创建pod,观察效果
# 创建Pod[root@k8s-master01 ~]# kubectl create -f pod-liveness-exec.yaml
pod/pod-liveness-exec created
# 查看Pod详情[root@k8s-master01 ~]# kubectl describe pods pod-liveness-exec -n dev......
Normal Created 20s (x2 over 50s) kubelet, node1 Created container nginx
Normal Started 20s (x2 over 50s) kubelet, node1 Started container nginx
Normal Killing 20s kubelet, node1 Container nginx failed liveness probe, will be restarted
Warning Unhealthy 0s (x5 over 40s) kubelet,node1 Liveness probe failed:cat:can't open '/tmp/hello11.txt': No such file or directory
# 观察上面的信息就会发现nginx容器启动之后就进行了健康检查# 检查失败之后,容器被kill掉,然后尝试进行重启(这是重启策略的作用,后面讲解)# 稍等一会之后,再观察pod信息,就可以看到RESTARTS不再是0,而是一直增长[root@k8s-master01 ~]# kubectl get pods pod-liveness-exec -n dev
NAME READY STATUS RESTARTS AGE
pod-liveness-exec 0/1 CrashLoopBackOff 2 3m19s
# 当然接下来,可以修改成一个存在的文件,比如/tmp/hello.txt,再试,结果就正常了......
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
方式二:TCPSocket
创建pod-liveness-tcpsocket.yaml
apiVersion: v1
kind: Pod
metadata:name: pod-liveness-tcpsocket
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
ports:-name: nginx-port
containerPort:80livenessProbe:tcpSocket:port:8080# 尝试访问8080端口
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
创建pod,观察效果
# 创建Pod[root@k8s-master01 ~]# kubectl create -f pod-liveness-tcpsocket.yaml
pod/pod-liveness-tcpsocket created
# 查看Pod详情[root@k8s-master01 ~]# kubectl describe pods pod-liveness-tcpsocket -n dev......
Normal Scheduled 31s default-scheduler Successfully assigned dev/pod-liveness-tcpsocket to node2
Normal Pulled > kubelet, node2 Container image "nginx:1.17.1" already present on machine
Normal Created > kubelet, node2 Created container nginx
Normal Started > kubelet, node2 Started container nginx
Warning Unhealthy > (x2 over >) kubelet,node2 Liveness probe failed:dial tcp 10.244.2.44:8080:connect: connection refused
# 观察上面的信息,发现尝试访问8080端口,但是失败了# 稍等一会之后,再观察pod信息,就可以看到RESTARTS不再是0,而是一直增长[root@k8s-master01 ~]# kubectl get pods pod-liveness-tcpsocket -n dev
NAME READY STATUS RESTARTS AGE
pod-liveness-tcpsocket 0/1 CrashLoopBackOff 2 3m19s
# 当然接下来,可以修改成一个可以访问的端口,比如80,再试,结果就正常了......
# 创建Pod[root@k8s-master01 ~]# kubectl create -f pod-liveness-httpget.yaml
pod/pod-liveness-httpget created
# 查看Pod详情[root@k8s-master01 ~]# kubectl describe pod pod-liveness-httpget -n dev.......
Normal Pulled 6s (x3 over 64s) kubelet, node1 Container image "nginx:1.17.1" already present on machine
Normal Created 6s (x3 over 64s) kubelet, node1 Created container nginx
Normal Started 6s (x3 over 63s) kubelet, node1 Started container nginx
Warning Unhealthy 6s (x6 over 56s) kubelet,node1 Liveness probe failed:HTTP probe failed with statuscode:404
Normal Killing 6s (x2 over 36s) kubelet, node1 Container nginx failed liveness probe, will be restarted
# 观察上面信息,尝试访问路径,但是未找到,出现404错误# 稍等一会之后,再观察pod信息,就可以看到RESTARTS不再是0,而是一直增长[root@k8s-master01 ~]# kubectl get pod pod-liveness-httpget -n dev
NAME READY STATUS RESTARTS AGE
pod-liveness-httpget 1/1 Running 5 3m17s
# 当然接下来,可以修改成一个可以访问的路径path,比如/,再试,结果就正常了......
apiVersion: v1
kind: Pod
metadata:name: pod-nodename
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
nodeName: node1 # 指定调度到node1节点上
1
2
3
4
5
6
7
8
9
10
#创建Pod[root@k8s-master01 ~]# kubectl create -f pod-nodename.yaml
pod/pod-nodename created
#查看Pod调度到NODE属性,确实是调度到了node1节点上[root@k8s-master01 ~]# kubectl get pods pod-nodename -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE ......
pod-nodename 1/1 Running 0 56s 10.244.1.87 node1 ......# 接下来,删除pod,修改nodeName的值为node3(并没有node3节点)[root@k8s-master01 ~]# kubectl delete -f pod-nodename.yaml
pod "pod-nodename" deleted
[root@k8s-master01 ~]# vim pod-nodename.yaml[root@k8s-master01 ~]# kubectl create -f pod-nodename.yaml
pod/pod-nodename created
#再次查看,发现已经向Node3节点调度,但是由于不存在node3节点,所以pod无法正常运行[root@k8s-master01 ~]# kubectl get pods pod-nodename -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE ......
pod-nodename 0/1 Pending 0 6s > node3 ......
apiVersion: v1
kind: Pod
metadata:name: pod-nodeaffinity-preferred
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
affinity:#亲和性设置nodeAffinity:#设置node亲和性preferredDuringSchedulingIgnoredDuringExecution:# 软限制-weight:1preference:matchExpressions:# 匹配env的值在["xxx","yyy"]中的标签(当前环境没有)-key: nodeenv
operator: In
values:["xxx","yyy"]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# 创建pod[root@k8s-master01 ~]# kubectl create -f pod-nodeaffinity-preferred.yaml
pod/pod-nodeaffinity-preferred created
# 查看pod状态 (运行成功)[root@k8s-master01 ~]# kubectl get pod pod-nodeaffinity-preferred -n dev
NAME READY STATUS RESTARTS AGE
pod-nodeaffinity-preferred 1/1 Running 0 40s
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-target
namespace: dev
labels:podenv: pro #设置标签spec:containers:-name: nginx
image: nginx:1.17.1
nodeName: node1 # 将目标pod名确指定到node1上
1
2
3
4
5
6
7
8
9
10
11
12
# 启动目标pod[root@k8s-master01 ~]# kubectl create -f pod-podaffinity-target.yaml
pod/pod-podaffinity-target created
# 查看pod状况[root@k8s-master01 ~]# kubectl get pods pod-podaffinity-target -n dev
NAME READY STATUS RESTARTS AGE
pod-podaffinity-target 1/1 Running 0 4s
1
2
3
4
5
6
7
8
2)创建pod-podaffinity-required.yaml,内容如下:
apiVersion: v1
kind: Pod
metadata:name: pod-podaffinity-required
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
affinity:#亲和性设置podAffinity:#设置pod亲和性requiredDuringSchedulingIgnoredDuringExecution:# 硬限制-labelSelector:matchExpressions:# 匹配env的值在["xxx","yyy"]中的标签-key: podenv
operator: In
values:["xxx","yyy"]topologyKey: kubernetes.io/hostname
# 启动pod[root@k8s-master01 ~]# kubectl create -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
# 查看pod状态,发现未运行[root@k8s-master01 ~]# kubectl get pods pod-podaffinity-required -n dev
NAME READY STATUS RESTARTS AGE
pod-podaffinity-required 0/1 Pending 0 9s
# 查看详细信息[root@k8s-master01 ~]# kubectl describe pods pod-podaffinity-required -n dev......Events:
Type Reason Age From Message
-------------------------Warning FailedScheduling default-scheduler 0/3 nodes are available: 2 node(s) didn't match pod affinity rules, 1 node(s) had taints that the pod didn't tolerate.
# 接下来修改 values: ["xxx","yyy"]----->values:["pro","yyy"]# 意思是:新Pod必须要与拥有标签nodeenv=xxx或者nodeenv=yyy的pod在同一Node上[root@k8s-master01 ~]# vim pod-podaffinity-required.yaml# 然后重新创建pod,查看效果[root@k8s-master01 ~]# kubectl delete -f pod-podaffinity-required.yaml
pod "pod-podaffinity-required" deleted
[root@k8s-master01 ~]# kubectl create -f pod-podaffinity-required.yaml
pod/pod-podaffinity-required created
# 发现此时Pod运行正常[root@k8s-master01 ~]# kubectl get pods pod-podaffinity-required -n dev
NAME READY STATUS RESTARTS AGE LABELS
pod-podaffinity-required 1/1 Running 0 6s >
apiVersion: v1
kind: Pod
metadata:name: pod-toleration
namespace: dev
spec:containers:-name: nginx
image: nginx:1.17.1
tolerations:# 添加容忍-key:"tag"# 要容忍的污点的keyoperator:"Equal"# 操作符value:"heima"# 容忍的污点的valueeffect:"NoExecute"# 添加容忍的规则,这里必须和标记的污点规则相同
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 添加容忍之前的pod[root@k8s-master01 ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED
pod-toleration 0/1 Pending 0 3s > > >
# 添加容忍之后的pod[root@k8s-master01 ~]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED
pod-toleration 1/1 Running 0 3s 10.244.1.62 node1 >