kubernetes集群所有的交互都是通过apiServer来进行的,因此k8s对权限的控制就尤其重要。
从1.6版本起,kubernetes默认启用RBAC访问控制策略。RBAC(Role-Based Access Control):基于角色的访问控制
相关概念:
RBAC中4种顶级资源:Role、ClusterRole、RoleBinding、ClusterRoleBinding
Role:
角色,包含一组权限的规则。没有拒绝规则,只是附加运行。Namespace隔离,只作用于命名空间
ClusterRole:
和Role的区别,Role只作用于命名空间内,ClusterRole作用于整个集群,也就是所有Namespace
RoleBinding:
作用于命名空间内,将ClusterRole或Role绑定于User、Group或ServiceAccount
ClusterRoleBinding:
作用于整个集群,将ClusterRole或Role绑定于User、Group或ServiceAccount
用户:k8s有两种用户:User和ServiceAccount。其中user是给人来使用的,serviceAccount是给进程来使用的。当一个运行在k8s中的进程需要访问apiServer时,我们就需要给其创建一个serviceAccount,并通过role来限制这个serviceAccount的权限,最后通过rolebinding将role绑定到该serviceAccount上。
可以通过kubectl create 来创建role、clusterrole、rolebinding、clusterrolebinding和serviceAccount
命令如下:
kubectl create sa <Name>
或
kubectl create serviceAccount <Name>
# 例如
kubectl create serviceaccount my-sa
命令如下:
kubectl create role <RoleName> [options]
常用options:
--resource=[]:
Resource that the rule applies to
--verb=[]:
Verb that applies to the resources contained in the rule
# 例如: 创建一个名为role-test的role,可以操作的资源为pod和deployment,可以进行的操作为get,create,list,watch,update,delete
kubectl create role role-test --resource=pod,deployment --verb=get,create,list,watch,update,delete
命令如下:
kubectl create rolebinding <RBNAME> [options]
常用options:
--role='':
Role this RoleBinding should reference
--serviceaccount=[]:
Service accounts to bind to the role, in the format <namespace>:<name>. The flag can be
repeated to add multiple service accounts.
--clusterrole='':
ClusterRole this RoleBinding should reference
# 例如 将role-test和my-sa进行绑定,绑定后my-sa就拥有了role中定义的规则
kubectl create rolebinding rb-test --role=role-test --serviceaccount=my-sa
下面将使用client go编写一个程序,该程序可以对default命名空间下的deploy进行create、update、get和delete的操作。最后将该程序部署到k8s的deploy中。
代码如下:
package main
import (
"context"
"github.com/gin-gonic/gin"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
appsresv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/util/retry"
"k8s.io/klog/v2"
"net/http"
)
func main() {
// 1、创建配置文件,由于要在k8s中运行,使用InClusterConfig可以获取到serviceAccount的配置
config, err := rest.InClusterConfig()
if err != nil {
panic(err)
}
// 2、创建clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
deployClient := clientset.AppsV1().Deployments(corev1.NamespaceDefault)
r := gin.Default()
r.PUT("/deploy", func(c *gin.Context) {
if err := CreateDeploy(deployClient); err != nil {
JsonErr(c, err)
return
}
c.JSON(http.StatusOK, gin.H{
"message": "success",
})
})
r.POST("/deploy", func(c *gin.Context) {
if err := UpdateDeploy(deployClient); err != nil {
JsonErr(c, err)
return
}
c.JSON(http.StatusOK, gin.H{
"message": "success",
})
})
r.GET("/deploy", func(c *gin.Context) {
if dep, err := ListDeploy(deployClient); err != nil {
JsonErr(c, err)
} else {
if dep == nil {
c.JSON(http.StatusOK, gin.H{
"message": "success",
"data": "nil",
})
return
}
c.JSON(http.StatusOK, gin.H{
"message": "success",
"data": map[string]interface{}{
"name": dep.Name,
"replicas": dep.Spec.Replicas,
"image": dep.Spec.Template.Spec.Containers[0].Image,
},
})
}
})
r.DELETE("/deploy", func(c *gin.Context) {
if err := DeleteDeploy(deployClient); err != nil {
JsonErr(c, err)
return
}
c.JSON(http.StatusOK, gin.H{
"message": "success",
})
})
r.Run(":9000")
}
func JsonErr(ctx *gin.Context, err error) {
ctx.JSON(http.StatusOK, gin.H{
"message": err.Error(),
})
}
func CreateDeploy(client appsresv1.DeploymentInterface) error {
klog.Info("CreateDeploy...........")
replicas := int32(2)
deploy := appsv1.Deployment{
TypeMeta: v1.TypeMeta{
Kind: "Deployment",
APIVersion: "apps/v1",
},
ObjectMeta: v1.ObjectMeta{
Name: "deploy-nginx-demo",
Namespace: corev1.NamespaceDefault,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &v1.LabelSelector{
MatchLabels: map[string]string{
"app": "nginx",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Name: "nginx",
Labels: map[string]string{
"app": "nginx",
},
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "web",
Image: "nginx:1.12",
Ports: []corev1.ContainerPort{
{
Protocol: corev1.ProtocolTCP,
ContainerPort: 80,
},
},
},
},
},
},
},
}
dep, err := client.Create(context.Background(), &deploy, v1.CreateOptions{})
if err != nil {
klog.Errorf("create deployment error:%v", err)
return err
}
klog.Infof("create deployment success, name:%s", dep.Name)
return nil
}
func UpdateDeploy(client appsresv1.DeploymentInterface) error {
klog.Info("UpdateDeploy...........")
// 当有多个客户端对同一个资源进行操作时,会发送错误。使用RetryOnConflict来重试,重试相关参数由DefaultRetry来提供
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
// 查询要更新的deploy
deploy, err := client.Get(context.Background(), "deploy-nginx-demo", v1.GetOptions{})
if err != nil {
klog.Errorf("can't get deployment, err:%v", err)
return nil
}
// 修改参数后进行更新
replicas := int32(1)
deploy.Spec.Replicas = &replicas
deploy.Spec.Template.Spec.Containers[0].Image = "nginx:1.13"
_, err = client.Update(context.Background(), deploy, v1.UpdateOptions{})
if err != nil {
klog.Errorf("update deployment error, err:%v", err)
}
return err
})
if err != nil {
klog.Errorf("update deployment error, err:%v", err)
return err
} else {
klog.Infof("update deployment success")
}
return nil
}
func ListDeploy(client appsresv1.DeploymentInterface) (*appsv1.Deployment, error) {
klog.Info("ListDeploy...........")
deplist, err := client.List(context.Background(), v1.ListOptions{})
if err != nil {
klog.Errorf("list deployment error, err:%v", err)
return nil, err
}
for _, dep := range deplist.Items {
klog.Infof("deploy name:%s, replicas:%d, container image:%s", dep.Name, *dep.Spec.Replicas, dep.Spec.Template.Spec.Containers[0].Image)
}
if deplist != nil && len(deplist.Items) > 0 {
return &deplist.Items[0], nil
}
return nil, nil
}
func DeleteDeploy(client appsresv1.DeploymentInterface) error {
klog.Info("DeleteDeploy...........")
// 删除策略
deletePolicy := v1.DeletePropagationForeground
err := client.Delete(context.Background(), "deploy-nginx-demo", v1.DeleteOptions{PropagationPolicy: &deletePolicy})
if err != nil {
klog.Errorf("delete deployment error, err:%v", err)
return err
} else {
klog.Info("delete deployment success")
}
return nil
}
dockerfile:
FROM golang:1.18 AS builder
WORKDIR /app
COPY . .
ENV GOPROXY https://goproxy.cn
RUN CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -o app main.go
FROM alpine
WORKDIR /app
COPY --from=builder /app/app .
EXPOSE 9000
CMD ["./app"]
1、使用dockerfile构建镜像:
docker build -t incluster_test .
2、使用K8S的deployment来部署,创建deploy.yaml
# 小技巧,自己写yaml文件太麻烦,我们可以使用命令来生成,然后再修改
kubectl create deploy incluster-test --replicas=1 --image=incluster_test --dry-run=client -o yaml > deploy.yaml
# 对deploy中内容进行修改
apiVersion: apps/v1
kind: Deployment
metadata:
name: incluster-test
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
spec:
serviceAccountName: incluster-test-sa
containers:
- name: test-incluster
image: incluster_test
imagePullPolicy: Never
ports:
- containerPort: 9000
3、创建serviceAccount:创建role目录,在role中创建sa.yaml
mkdir role
cd role
kubectl create sa incluster-test-sa --dry-run=client -o yaml > sa.yaml
4、创建role:创建role.yaml
kubectl create role incluster-test-role --resource=deployment --verb=create,get,list,update,delete --dry-run=client -o yaml > role.yaml
5、创建rolebinding:创建rb.yaml
kubectl create rolebinding incluster-test-rb --serviceaccount=default:incluster-test-sa --role=incluster-test-role --dry-run=client -o yaml > rb.yaml
6、部署:
# 在role目录中执行下面命令,创建serviceAccount、role和rolebinding
kubectl create -f .
[root@master role]# kubectl create -f .
rolebinding.rbac.authorization.k8s.io/incluster-test-rb created
role.rbac.authorization.k8s.io/incluster-test-role created
serviceaccount/incluster-test-sa created
# 部署deployment
[root@master test_incluster]# kubectl create -f deploy.yaml
deployment.apps/incluster-test created
# 查看deploy和pod
[root@master test_incluster]# kubectl get deploy,pod
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/incluster-test 1/1 1 1 98s
NAME READY STATUS RESTARTS AGE
pod/incluster-test-779cc788f4-dfntb 1/1 Running 0 98s
# 使用curl来访问
[root@master test_incluster]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
incluster-test-779cc788f4-dfntb 1/1 Running 0 2m6s 10.244.0.40 master <none> <none>
# 使用curl,PUT方法来创建deploy
[root@master test_incluster]# curl -X PUT 10.244.0.40:9000/deploy
{"message":"success"}
# GET方法查看Pod
[root@master test_incluster]# curl 10.244.0.40:9000/deploy
{"data":{"image":"nginx:1.12","name":"deploy-nginx-demo","replicas":2},"message":"success"}
# POST方法更新Pod
[root@master test_incluster]# curl -X POST 10.244.0.40:9000/deploy
{"message":"success"}
[root@master test_incluster]# curl 10.244.0.40:9000/deploy
{"data":{"image":"nginx:1.13","name":"deploy-nginx-demo","replicas":1},"message":"success"}
# DELETE方法删除Pod
[root@master test_incluster]# curl -X DELETE 10.244.0.40:9000/deploy
{"message":"success"}