Pod的创建,删除,修改流程图解
1.执行kubectl命令时会加载"~/.kube/config",从而识别到apiserver的地址,端口及认证证书;
2.apiserver进行证书认证,鉴权,语法检查,若成功则可以进行数据的读取或者写入;
3.若用户是写入操作(创建,修改,删除)则需要修改etcd数据库的信息;
4.如果创建Pod,此时scheduler负责Pod调度,将Pod调度到合适的worker节点,并将结果返回给ApiServer,由apiServer负责存储到etcd中;
5.kubelet组件会周期性上报给apiServer节点,包括Pod内的容器资源(cpu,memory,disk,gpu,...)及worker宿主机节点状态,apiServer并将结果存储到etcd中,若有该节点的任务也会直接返回给该节点进行调度;
6.kubelet开始调用CRI接口创建容器(依次创建pause,initContainers,containers);
7.在运行过程中,若Pod容器,正常或者异常退出时,kubelet会根据重启策略是否重启容器(Never,Always,OnFailure);
8.若一个节点挂掉,则需要controller manager介入维护,比如Pod副本数量缺失,则需要创建watch事件,要求控制器的副本数要达到标准,从而要创建新的Pod,此过程重复步骤4-6。
Pod调度之NodeName
[root@master231 scheduler-pods]# cat 01-scheduler-nodeName.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian
labels:
apps: xiuxian
spec:
replicas: 5
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
nodeName: worker233
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
[root@master231 scheduler-pods]#
[root@master231 scheduler-pods]# kubectl apply -f 01-scheduler-nodeName.yaml
deployment.apps/deploy-xiuxian created
[root@master231 scheduler-pods]#
[root@master231 scheduler-pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-xiuxian-69859855db-26c6j 1/1 Running 0 3s 10.100.140.72 worker233 <none> <none>
deploy-xiuxian-69859855db-hmsd2 1/1 Running 0 3s 10.100.140.69 worker233 <none> <none>
deploy-xiuxian-69859855db-jhmnt 1/1 Running 0 3s 10.100.140.71 worker233 <none> <none>
deploy-xiuxian-69859855db-jnjkd 1/1 Running 0 3s 10.100.140.65 worker233 <none> <none>
deploy-xiuxian-69859855db-p8l57 1/1 Running 0 3s 10.100.140.70 worker233 <none> <none>
[root@master231 scheduler-pods]#
Pod调度之hostPort
[root@master231 scheduler-pods]# cat 02-scheduler-ports.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian-ports
labels:
apps: xiuxian
spec:
replicas: 5
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
# 暴露容器的端口
ports:
# 暴露容器的80端口
- containerPort: 80
# 指定宿主机端口
hostPort: 81
[root@master231 scheduler-pods]#
[root@master231 scheduler-pods]# kubectl apply -f 02-scheduler-ports.yaml
deployment.apps/deploy-xiuxian-ports created
[root@master231 scheduler-pods]#
[root@master231 scheduler-pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-xiuxian-ports-98b55cf8-6j584 0/1 Pending 0 3s <none> <none> <none> <none>
deploy-xiuxian-ports-98b55cf8-cvw7v 1/1 Running 0 3s 10.100.140.73 worker233 <none> <none>
deploy-xiuxian-ports-98b55cf8-qrclr 1/1 Running 0 3s 10.100.203.171 worker232 <none> <none>
deploy-xiuxian-ports-98b55cf8-vwdpt 0/1 Pending 0 3s <none> <none> <none> <none>
deploy-xiuxian-ports-98b55cf8-z5mzp 0/1 Pending 0 3s <none> <none> <none> <none>
[root@master231 scheduler-pods]#
访问测试:
http://10.0.0.232:81/
http://10.0.0.233:81/
Pod调度之hostNetwork
[root@master231 scheduler-pods]# cat 03-scheduler-hostNetwork.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian-hostnetwork
labels:
apps: xiuxian
spec:
replicas: 5
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
hostNetwork: true
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
ports:
- containerPort: 80
[root@master231 scheduler-pods]#
[root@master231 scheduler-pods]# kubectl apply -f 03-scheduler-hostNetwork.yaml
deployment.apps/deploy-xiuxian-hostnetwork created
[root@master231 scheduler-pods]#
[root@master231 scheduler-pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-xiuxian-hostnetwork-67ff44df9b-bfxdm 1/1 Running 0 3s 10.0.0.232 worker232 <none> <none>
deploy-xiuxian-hostnetwork-67ff44df9b-ftnwc 0/1 Pending 0 3s <none> <none> <none> <none>
deploy-xiuxian-hostnetwork-67ff44df9b-lwzqn 0/1 Pending 0 3s <none> <none> <none> <none>
deploy-xiuxian-hostnetwork-67ff44df9b-mfk6g 0/1 Pending 0 3s <none> <none> <none> <none>
deploy-xiuxian-hostnetwork-67ff44df9b-zdbmk 1/1 Running 0 3s 10.0.0.233 worker233 <none> <none>
[root@master231 scheduler-pods]#
访问测试:
http://10.0.0.232
http://10.0.0.233
DNS的解析策略
[root@master231 deployments]# cat 05-deploy-dnsPolicy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian-dnspolicy
labels:
apps: xiuxian
spec:
replicas: 5
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
# 指定DNS的解析策略,有效值为: 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'
# 默认值为: ClusterFirst,表示以集群的DNS解析优先,特指的是我们的CoreDNS服务。
# dnsPolicy:
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
[root@master231 deployments]#
[root@master231 deployments]# kubectl apply -f 05-deploy-dnsPolicy.yaml
deployment.apps/deploy-xiuxian-dnspolicy created
[root@master231 deployments]#
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-xiuxian-dnspolicy-9ddcfd7db-6mpsc 1/1 Running 0 3s 10.100.140.79 worker233 <none> <none>
deploy-xiuxian-dnspolicy-9ddcfd7db-gfjbw 1/1 Running 0 3s 10.100.203.174 worker232 <none> <none>
deploy-xiuxian-dnspolicy-9ddcfd7db-pctmz 1/1 Running 0 3s 10.100.203.176 worker232 <none> <none>
deploy-xiuxian-dnspolicy-9ddcfd7db-rf8nw 1/1 Running 0 3s 10.100.140.78 worker233 <none> <none>
deploy-xiuxian-dnspolicy-9ddcfd7db-zf5sn 1/1 Running 0 3s 10.100.140.77 worker233 <none> <none>
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods deploy-xiuxian-dnspolicy-9ddcfd7db-6mpsc -o yaml | grep dnsPolicy
dnsPolicy: ClusterFirst
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o yaml | grep dnsPolicy
dnsPolicy: ClusterFirst
dnsPolicy: ClusterFirst
dnsPolicy: ClusterFirst
dnsPolicy: ClusterFirst
dnsPolicy: ClusterFirst
[root@master231 deployments]#
[root@master231 deployments]#
[root@master231 deployments]# kubectl get svc -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
calico-apiserver calico-api ClusterIP 10.200.87.86 <none> 443/TCP 2d6h
calico-system calico-kube-controllers-metrics ClusterIP None <none> 9094/TCP 2d6h
calico-system calico-typha ClusterIP 10.200.43.178 <none> 5473/TCP 2d6h
default baidu ExternalName <none> www.cnblogs.com <none> 137m
default ep-db ClusterIP 10.200.2.2 <none> 3306/TCP 116m
default kubernetes ClusterIP 10.200.0.1 <none> 443/TCP 2d7h
default rc-xiuxian ClusterIP 10.200.196.245 <none> 80/TCP 26h
default svc-externalname ExternalName <none> www.cnblogs.com <none> 136m
default svc-xiuxian ClusterIP 10.200.25.148 <none> 90/TCP 26h
default svc-xiuxian-nodeport NodePort 10.200.21.38 <none> 90:8080/TCP 24h
kube-system kube-dns ClusterIP 10.200.0.10 <none> 53/UDP,53/TCP,9153/TCP 2d7h
metallb-system metallb-webhook-service ClusterIP 10.200.27.157 <none> 443/TCP 6h6m
violet svc-xiuxian-lb LoadBalancer 10.200.150.228 10.0.0.150 90:30120/TCP 6h
[root@master231 deployments]#
[root@master231 deployments]# kubectl exec -it deploy-xiuxian-dnspolicy-9ddcfd7db-6mpsc -- sh
/ # ping ep-db
PING ep-db (10.200.2.2): 56 data bytes
64 bytes from 10.200.2.2: seq=0 ttl=64 time=0.165 ms
^C
--- ep-db ping statistics ---
1 packets transmitted, 1 packets received, 0% packet loss
round-trip min/avg/max = 0.165/0.165/0.165 ms
/ #
/ # cat /etc/resolv.conf
nameserver 10.200.0.10
search default.svc.violet.com svc.violet.com violet.com
options ndots:5
/ #
- 2.但是使用了hostNetwork则默认的策略将会失效
[root@master231 deployments]# kubectl delete -f 05-deploy-dnsPolicy.yaml
deployment.apps "deploy-xiuxian-dnspolicy" deleted
[root@master231 deployments]#
[root@master231 deployments]# vim 05-deploy-dnsPolicy.yaml
[root@master231 deployments]#
[root@master231 deployments]# cat 05-deploy-dnsPolicy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian-dnspolicy
labels:
apps: xiuxian
spec:
replicas: 1
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
# 指定DNS的解析策略,有效值为: 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'
# 默认值为: ClusterFirst,表示以集群的DNS解析优先,特指的是我们的CoreDNS服务。
# dnsPolicy:
hostNetwork: true
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
[root@master231 deployments]#
[root@master231 deployments]# kubectl apply -f 05-deploy-dnsPolicy.yaml
deployment.apps/deploy-xiuxian-dnspolicy created
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-xiuxian-dnspolicy-8564d5dc99-8sccq 1/1 Running 0 3s 10.0.0.233 worker233 <none> <none>
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o yaml | grep dnsPolicy
dnsPolicy: ClusterFirst
[root@master231 deployments]#
[root@master231 deployments]# kubectl exec -it deploy-xiuxian-dnspolicy-8564d5dc99-8sccq -- sh
/ #
/ # cat /etc/resolv.conf
nameserver 223.5.5.5
nameserver 223.6.6.6
search
/ #
/ # ping ep-db
ping: bad address 'ep-db'
/ #
[root@master231 deployments]# kubectl delete -f 05-deploy-dnsPolicy.yaml
deployment.apps "deploy-xiuxian-dnspolicy" deleted
[root@master231 deployments]#
[root@master231 deployments]# cat 05-deploy-dnsPolicy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian-dnspolicy
labels:
apps: xiuxian
spec:
replicas: 1
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
# 指定DNS的解析策略,有效值为: 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'
# 默认值为: ClusterFirst,表示以集群的DNS解析优先,特指的是我们的CoreDNS服务。
# 其中'ClusterFirstWithHostNet'一般情况下会和"hostNetwork: true"搭配使用,还是优先使用K8S集群的CoreDNS
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
[root@master231 deployments]#
[root@master231 deployments]# kubectl apply -f 05-deploy-dnsPolicy.yaml
deployment.apps/deploy-xiuxian-dnspolicy created
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-xiuxian-dnspolicy-6d8fd88c4b-cz2jv 1/1 Running 0 5s 10.0.0.233 worker233 <none> <none>
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o yaml | grep dnsPolicy
dnsPolicy: ClusterFirstWithHostNet
[root@master231 deployments]#
[root@master231 deployments]# kubectl exec -it deploy-xiuxian-dnspolicy-6d8fd88c4b-cz2jv -- sh
/ # cat /etc/resolv.conf
nameserver 10.200.0.10
search default.svc.violet.com svc.violet.com violet.com
options ndots:5
/ #
/ # ping ep-db
PING ep-db (10.200.2.2): 56 data bytes
64 bytes from 10.200.2.2: seq=0 ttl=64 time=0.303 ms
64 bytes from 10.200.2.2: seq=1 ttl=64 time=0.109 ms
^C
--- ep-db ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.109/0.206/0.303 ms
/ #
Pod调度之nodeSelector
所谓的nodeSelector,顾名思义,就是根据节点标签选择要将Pod调度到哪些节点上。
2.1 环境准备
[root@master231 ~]# kubectl label nodes master231 school=beijing
node/master231 labeled
[root@master231 ~]#
[root@master231 ~]# kubectl label nodes worker232 school=shanghai
node/worker232 labeled
[root@master231 ~]#
[root@master231 ~]# kubectl label nodes worker233 school=shenzhen
node/worker233 labeled
[root@master231 ~]#
[root@master231 ~]# kubectl get nodes -l school=shanghai
NAME STATUS ROLES AGE VERSION
worker232 Ready <none> 2d23h v1.23.17
[root@master231 ~]#
[root@master231 ~]# kubectl get nodes -l school=shanghai -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
worker232 Ready <none> 2d23h v1.23.17 10.0.0.232 <none> Ubuntu 22.04.4 LTS 5.15.0-119-generic docker://20.10.24
[root@master231 ~]#
2.2 编写资源清单
[root@master231 scheduler]# cat 01-deploy-nodeSelector.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scheduler-nodeselector
spec:
replicas: 5
selector:
matchLabels:
apps: xiuxian
template:
metadata:
labels:
apps: xiuxian
spec:
# 将Pod调度到指定节点,基于标签过滤节点
nodeSelector:
school: shanghai
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: c1
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl apply -f 01-deploy-nodeSelector.yaml
deployment.apps/scheduler-nodeselector created
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-nodeselector-774bf9875f-lktpk 1/1 Running 0 5s 10.100.203.136 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-n86nf 1/1 Running 0 5s 10.100.203.139 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-q9w9l 1/1 Running 0 5s 10.100.203.140 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-v6774 1/1 Running 0 5s 10.100.203.137 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-xj7jf 1/1 Running 0 5s 10.100.203.138 worker232 <none> <none>
[root@master231 scheduler]#
玩转Pod调度之污点taints
污点通常情况下是作用在worker节点上,其可以影响Pod的调度。
污点的语法格式如下:
key[=value]:effect
相关字段说明:
key:
字母或数字开头,可以包含字母、数字、连字符(-)、点(.)和下划线(_),最多253个字符。
也可以以DNS子域前缀和单个"/"开头
value:
该值是可选的。如果给定,它必须以字母或数字开头,可以包含字母、数字、连字符、点和下划线,最多63个字符。
effect:[ɪˈfekt]
effect必须是NoSchedule、PreferNoSchedule或NoExecute。
NoSchedule: [noʊ,ˈskedʒuːl]
该节点不再接收新的Pod调度,但不会驱赶已经调度到该节点的Pod。
PreferNoSchedule: [prɪˈfɜːr,noʊ,ˈskedʒuː]
该节点可以接受调度,但会尽可能将Pod调度到其他节点,换句话说,让该节点的调度优先级降低啦。
NoExecute:[ˈnoʊ,eksɪkjuːt]
该节点不再接收新的Pod调度,与此同时,会立刻驱逐已经调度到该节点的Pod。
2.1 查看污点
[root@master231 scheduler]# kubectl describe nodes | grep Taints
Taints: node-role.kubernetes.io/master:NoSchedule
Taints: <none>
Taints: <none>
[root@master231 scheduler]#
2.2 给节点打污点
[root@master231 scheduler]# kubectl taint node worker232 class=linux96:NoSchedule
node/worker232 tainted
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints
Taints: node-role.kubernetes.io/master:NoSchedule
Taints: class=linux96:NoSchedule
Taints: <none>
[root@master231 scheduler]#
2.3 删除污点
[root@master231 scheduler]# kubectl describe nodes | grep Taints
Taints: node-role.kubernetes.io/master:NoSchedule
Taints: class=linux96:NoSchedule
Taints: <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl taint node worker232 class=linux96:NoSchedule-
node/worker232 untainted
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints
Taints: node-role.kubernetes.io/master:NoSchedule
Taints: <none>
Taints: <none>
[root@master231 scheduler]#
2.4 一个节点可以打多个污点
[root@master231 scheduler]# kubectl get pods -o wide # worker232节点的Pod正常运行Running
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-nodeselector-774bf9875f-956jk 1/1 Running 0 4s 10.100.203.146 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-lktpk 1/1 Running 0 22m 10.100.203.136 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-lm7g5 1/1 Running 0 4s 10.100.203.147 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-n86nf 1/1 Running 0 22m 10.100.203.139 worker232 <none> <none>
scheduler-nodeselector-774bf9875f-q9w9l 1/1 Running 0 22m 10.100.203.140 worker232 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl taint node worker232 class=linux96:PreferNoSchedule
node/worker232 tainted
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl taint node worker232 class=linux96:NoExecute
node/worker232 tainted
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2 # 很明显,一个节点可以打多个污点
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: class=linux96:NoExecute
class=linux96:PreferNoSchedule
Unschedulable: false
--
Taints: <none>
Unschedulable: false
Lease:
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide # 由于存在NoExecute污点,worker232节点的Pod都被驱逐走了
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-nodeselector-774bf9875f-krhz5 0/1 Pending 0 15s <none> <none> <none> <none>
scheduler-nodeselector-774bf9875f-l862v 0/1 Pending 0 15s <none> <none> <none> <none>
scheduler-nodeselector-774bf9875f-mxjzp 0/1 Pending 0 15s <none> <none> <none> <none>
scheduler-nodeselector-774bf9875f-vptc8 0/1 Pending 0 15s <none> <none> <none> <none>
scheduler-nodeselector-774bf9875f-z6rtd 0/1 Pending 0 15s <none> <none> <none> <none>
[root@master231 scheduler]#
2.5 修改污点【key和value相同,但effect不同,则说明是不同的污点!其中key和effect相同,则说明其是相同的污点!】
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: class=linux96:NoExecute
class=linux96:PreferNoSchedule
Unschedulable: false
--
Taints: <none>
Unschedulable: false
Lease:
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl taint node worker232 class=LINUX96:NoExecute --overwrite
node/worker232 modified
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: class=LINUX96:NoExecute
class=linux96:PreferNoSchedule
Unschedulable: false
--
Taints: <none>
Unschedulable: false
Lease:
[root@master231 scheduler]#
Pod调度之污点容忍tolerations
[root@master231 scheduler]# kubectl taint node worker233 school=violet:NoSchedule
node/worker233 tainted
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: class=LINUX96:NoExecute
class=linux96:PreferNoSchedule
Unschedulable: false
--
Taints: school=violet:NoSchedule
Unschedulable: false
Lease:
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl label nodes worker233 school=beijing --overwrite
node/worker233 unlabeled
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes -l school --show-labels | grep school
master231 Ready control-plane,master 3d v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=beijing
worker232 Ready <none> 3d v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 3d v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=beijing
[root@master231 scheduler]#
[root@master231 scheduler]# cat 02-deploy-nodeSelector-tolerations.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scheduler-nodeselector-tolerations
spec:
replicas: 5
selector:
matchLabels:
apps: xiuxian
template:
metadata:
labels:
apps: xiuxian
spec:
# 配置污点容忍,容忍3个节点的污点,注意,一个Pod要能够调度到某个节点,则必须容忍该节点的所有污点!
tolerations:
# 表示匹配污点的key
- key: node-role.kubernetes.io/master
# 表示key和value之间的关系,有效值为: Exists和Equal
# 当值为Exists时,value必须为空,当值为Equal时,必须定义value字段。
operator: Exists
# 当effect为空时,表示匹配所有的污点taints
effect: NoSchedule
- key: class
operator: Exists
- key: school
# 表示匹配污点的value,当value为空时,表示匹配所有的值
value: violet
effect: NoSchedule
operator: Equal
nodeSelector:
school: beijing
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: c1
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl apply -f 02-deploy-nodeSelector-tolerations.yaml
deployment.apps/scheduler-nodeselector-tolerations configured
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-nodeselector-tolerations-ccc7956bd-29gfc 1/1 Running 0 7s 10.100.160.143 master231 <none> <none>
scheduler-nodeselector-tolerations-ccc7956bd-8htmd 1/1 Running 0 7s 10.100.140.75 worker233 <none> <none>
scheduler-nodeselector-tolerations-ccc7956bd-pw6cq 1/1 Running 0 9s 10.100.140.73 worker233 <none> <none>
scheduler-nodeselector-tolerations-ccc7956bd-q99sh 1/1 Running 0 9s 10.100.160.142 master231 <none> <none>
scheduler-nodeselector-tolerations-ccc7956bd-tp4jd 1/1 Running 0 9s 10.100.140.74 worker233 <none> <none>
[root@master231 scheduler]#
玩转Pod调度之cordon
cordon可以将节点标记为不可调度并为其打上污点 。
2.1 环境准备
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: class=LINUX96:NoExecute
class=linux96:PreferNoSchedule
Unschedulable: false
--
Taints: school=violet:NoSchedule
Unschedulable: false
Lease:
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl taint node worker232 class-
node/worker232 untainted
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl taint node worker233 school-
node/worker233 untainted
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
[root@master231 scheduler]#
2.2 创建测试环境
[root@master231 scheduler]# cat 03-deploy-resources.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scheduler-resources
spec:
replicas: 5
selector:
matchLabels:
apps: xiuxian
template:
metadata:
labels:
apps: xiuxian
spec:
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: c1
resources:
limits:
cpu: 0.5
memory: 200Mi
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl apply -f 03-deploy-resources.yaml
deployment.apps/scheduler-resources created
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-resources-6d6785785-kxlmd 1/1 Running 0 4s 10.100.160.144 master231 <none> <none>
scheduler-resources-6d6785785-nn8mb 1/1 Running 0 4s 10.100.140.76 worker233 <none> <none>
scheduler-resources-6d6785785-qw822 1/1 Running 0 4s 10.100.203.148 worker232 <none> <none>
scheduler-resources-6d6785785-s86pt 1/1 Running 0 4s 10.100.140.77 worker233 <none> <none>
scheduler-resources-6d6785785-vrn87 1/1 Running 0 4s 10.100.203.149 worker232 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master231 Ready control-plane,master 3d v1.23.17
worker232 Ready <none> 3d v1.23.17
worker233 Ready <none> 3d v1.23.17
[root@master231 scheduler]#
2.3 标记节点不可调度
[root@master231 scheduler]# kubectl cordon worker233
node/worker233 cordoned
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes # 此处会将节点标记为不可调度SchedulingDisabled
NAME STATUS ROLES AGE VERSION
master231 Ready control-plane,master 3d v1.23.17
worker232 Ready <none> 3d v1.23.17
worker233 Ready,SchedulingDisabled <none> 3d v1.23.17
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-resources-6d6785785-kxlmd 1/1 Running 0 33s 10.100.160.144 master231 <none> <none>
scheduler-resources-6d6785785-nn8mb 1/1 Running 0 33s 10.100.140.76 worker233 <none> <none>
scheduler-resources-6d6785785-qw822 1/1 Running 0 33s 10.100.203.148 worker232 <none> <none>
scheduler-resources-6d6785785-s86pt 1/1 Running 0 33s 10.100.140.77 worker233 <none> <none>
scheduler-resources-6d6785785-vrn87 1/1 Running 0 33s 10.100.203.149 worker232 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: node.kubernetes.io/unschedulable:NoSchedule # 不难发现,多出来了污点
Unschedulable: true
Lease:
[root@master231 scheduler]#
2.4 测试验证
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl scale --replicas=10 deployment scheduler-resources
deployment.apps/scheduler-resources scaled
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-resources-6d6785785-jvv29 0/1 Pending 0 8s <none> <none> <none> <none>
scheduler-resources-6d6785785-kdqvm 0/1 Pending 0 8s <none> <none> <none> <none>
scheduler-resources-6d6785785-kxlmd 1/1 Running 0 2m26s 10.100.160.144 master231 <none> <none>
scheduler-resources-6d6785785-l84tb 1/1 Running 0 8s 10.100.203.150 worker232 <none> <none>
scheduler-resources-6d6785785-ltdn5 1/1 Running 0 8s 10.100.160.145 master231 <none> <none>
scheduler-resources-6d6785785-nn8mb 1/1 Running 0 2m26s 10.100.140.76 worker233 <none> <none>
scheduler-resources-6d6785785-qw822 1/1 Running 0 2m26s 10.100.203.148 worker232 <none> <none>
scheduler-resources-6d6785785-s86pt 1/1 Running 0 2m26s 10.100.140.77 worker233 <none> <none>
scheduler-resources-6d6785785-swjl4 1/1 Running 0 8s 10.100.203.151 worker232 <none> <none>
scheduler-resources-6d6785785-vrn87 1/1 Running 0 2m26s 10.100.203.149 worker232 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe pod scheduler-resources-6d6785785-jvv29
Name: scheduler-resources-6d6785785-jvv29
Namespace: default
Priority: 0
Node: <none>
Labels: apps=xiuxian
pod-template-hash=6d6785785
Annotations: <none>
Status: Pending
IP:
IPs: <none>
Controlled By: ReplicaSet/scheduler-resources-6d6785785
Containers:
c1:
Image: harbor250.violet.com/violet-xiuxian/apps:v1
Port: <none>
Host Port: <none>
Limits:
cpu: 500m
memory: 200Mi
Requests:
cpu: 500m
memory: 200Mi
Environment: <none>
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-sdz86 (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
kube-api-access-sdz86:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Guaranteed
Node-Selectors: <none>
Tolerations: node-role.kubernetes.io/master:NoSchedule op=Exists
node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 35s default-scheduler 0/3 nodes are available: 1 node(s) were unschedulable, 2 Insufficient cpu.
[root@master231 scheduler]#
温馨提示:
之所以master231节点无法创建4个Pod原因是静态Pod目录的4个组件吃掉了0.65核心,导致创建2个pod后,总的cpu数量为1.65核心,因此无法创建第三个Pod。
Pod调度之uncordon
所谓的uncordon就是将cordon状态解除的作用,说白了,就是取消节点不可调度。
2.1 查看现有的环境
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: node.kubernetes.io/unschedulable:NoSchedule
Unschedulable: true
Lease:
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master231 Ready control-plane,master 3d v1.23.17
worker232 Ready <none> 3d v1.23.17
worker233 Ready,SchedulingDisabled <none> 3d v1.23.17
[root@master231 scheduler]#
2.2 取消节点不可调度
[root@master231 scheduler]# kubectl uncordon worker233
node/worker233 uncordoned
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master231 Ready control-plane,master 3d v1.23.17
worker232 Ready <none> 3d v1.23.17
worker233 Ready <none> 3d v1.23.17
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
[root@master231 scheduler]#
2.3 再次查看发现已经调度成功啦!
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-resources-6d6785785-jvv29 1/1 Running 0 8m1s 10.100.140.78 worker233 <none> <none>
scheduler-resources-6d6785785-kdqvm 1/1 Running 0 8m1s 10.100.140.79 worker233 <none> <none>
scheduler-resources-6d6785785-kxlmd 1/1 Running 0 10m 10.100.160.144 master231 <none> <none>
scheduler-resources-6d6785785-l84tb 1/1 Running 0 8m1s 10.100.203.150 worker232 <none> <none>
scheduler-resources-6d6785785-ltdn5 1/1 Running 0 8m1s 10.100.160.145 master231 <none> <none>
scheduler-resources-6d6785785-nn8mb 1/1 Running 0 10m 10.100.140.76 worker233 <none> <none>
scheduler-resources-6d6785785-qw822 1/1 Running 0 10m 10.100.203.148 worker232 <none> <none>
scheduler-resources-6d6785785-s86pt 1/1 Running 0 10m 10.100.140.77 worker233 <none> <none>
scheduler-resources-6d6785785-swjl4 1/1 Running 0 8m1s 10.100.203.151 worker232 <none> <none>
scheduler-resources-6d6785785-vrn87 1/1 Running 0 10m 10.100.203.149 worker232 <none> <none>
[root@master231 scheduler]#
Pod调度之drain
drain就是驱逐已经调度到该节点的所有Pod,底层会调用cordon。
2.1 查看现有的环境
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-resources-6d6785785-jvv29 1/1 Running 0 10m 10.100.140.78 worker233 <none> <none>
scheduler-resources-6d6785785-kdqvm 1/1 Running 0 10m 10.100.140.79 worker233 <none> <none>
scheduler-resources-6d6785785-kxlmd 1/1 Running 0 12m 10.100.160.144 master231 <none> <none>
scheduler-resources-6d6785785-l84tb 1/1 Running 0 10m 10.100.203.150 worker232 <none> <none>
scheduler-resources-6d6785785-ltdn5 1/1 Running 0 10m 10.100.160.145 master231 <none> <none>
scheduler-resources-6d6785785-nn8mb 1/1 Running 0 12m 10.100.140.76 worker233 <none> <none>
scheduler-resources-6d6785785-qw822 1/1 Running 0 12m 10.100.203.148 worker232 <none> <none>
scheduler-resources-6d6785785-s86pt 1/1 Running 0 12m 10.100.140.77 worker233 <none> <none>
scheduler-resources-6d6785785-swjl4 1/1 Running 0 10m 10.100.203.151 worker232 <none> <none>
scheduler-resources-6d6785785-vrn87 1/1 Running 0 12m 10.100.203.149 worker232 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master231 Ready control-plane,master 3d v1.23.17
worker232 Ready <none> 3d v1.23.17
worker233 Ready <none> 3d v1.23.17
[root@master231 scheduler]#
2.2 开始驱逐Pod
[root@master231 scheduler]# kubectl drain worker233 --ignore-daemonsets
node/worker233 cordoned
WARNING: ignoring DaemonSet-managed Pods: calico-system/calico-node-d4554, calico-system/csi-node-driver-8vj74, kube-system/kube-proxy-mbdf6, metallb-system/speaker-cpt7s
evicting pod tigera-operator/tigera-operator-8d497bb9f-bq8gw
evicting pod default/scheduler-resources-6d6785785-kdqvm
evicting pod calico-apiserver/calico-apiserver-64b779ff45-4nzng
evicting pod calico-system/calico-typha-595f8c6fcb-rqnnm
evicting pod default/scheduler-resources-6d6785785-jvv29
evicting pod default/scheduler-resources-6d6785785-s86pt
evicting pod default/scheduler-resources-6d6785785-nn8mb
evicting pod metallb-system/controller-686c7db689-tz62z
pod/controller-686c7db689-tz62z evicted
pod/scheduler-resources-6d6785785-jvv29 evicted
pod/scheduler-resources-6d6785785-kdqvm evicted
pod/scheduler-resources-6d6785785-nn8mb evicted
pod/calico-typha-595f8c6fcb-rqnnm evicted
pod/calico-apiserver-64b779ff45-4nzng evicted
pod/tigera-operator-8d497bb9f-bq8gw evicted
pod/scheduler-resources-6d6785785-s86pt evicted
node/worker233 drained
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-resources-6d6785785-hmghm 0/1 Pending 0 29s <none> <none> <none> <none>
scheduler-resources-6d6785785-kxlmd 1/1 Running 0 14m 10.100.160.144 master231 <none> <none>
scheduler-resources-6d6785785-l5nns 0/1 Pending 0 29s <none> <none> <none> <none>
scheduler-resources-6d6785785-l84tb 1/1 Running 0 12m 10.100.203.150 worker232 <none> <none>
scheduler-resources-6d6785785-ltdn5 1/1 Running 0 12m 10.100.160.145 master231 <none> <none>
scheduler-resources-6d6785785-qw822 1/1 Running 0 14m 10.100.203.148 worker232 <none> <none>
scheduler-resources-6d6785785-swjl4 1/1 Running 0 12m 10.100.203.151 worker232 <none> <none>
scheduler-resources-6d6785785-vrch5 0/1 Pending 0 29s <none> <none> <none> <none>
scheduler-resources-6d6785785-vrn87 1/1 Running 0 14m 10.100.203.149 worker232 <none> <none>
scheduler-resources-6d6785785-wz9xq 0/1 Pending 0 29s <none> <none> <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master231 Ready control-plane,master 3d v1.23.17
worker232 Ready <none> 3d v1.23.17
worker233 Ready,SchedulingDisabled <none> 3d v1.23.17
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: node.kubernetes.io/unschedulable:NoSchedule
Unschedulable: true
Lease:
[root@master231 scheduler]#
2.3 还原驱逐效果
[root@master231 scheduler]# kubectl uncordon worker233
node/worker233 uncordoned
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl describe nodes | grep Taints -A 2
Taints: node-role.kubernetes.io/master:NoSchedule
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
--
Taints: <none>
Unschedulable: false
Lease:
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master231 Ready control-plane,master 3d v1.23.17
worker232 Ready <none> 3d v1.23.17
worker233 Ready <none> 3d v1.23.17
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-resources-6d6785785-hmghm 1/1 Running 0 78s 10.100.140.83 worker233 <none> <none>
scheduler-resources-6d6785785-kxlmd 1/1 Running 0 15m 10.100.160.144 master231 <none> <none>
scheduler-resources-6d6785785-l5nns 1/1 Running 0 78s 10.100.140.80 worker233 <none> <none>
scheduler-resources-6d6785785-l84tb 1/1 Running 0 13m 10.100.203.150 worker232 <none> <none>
scheduler-resources-6d6785785-ltdn5 1/1 Running 0 13m 10.100.160.145 master231 <none> <none>
scheduler-resources-6d6785785-qw822 1/1 Running 0 15m 10.100.203.148 worker232 <none> <none>
scheduler-resources-6d6785785-swjl4 1/1 Running 0 13m 10.100.203.151 worker232 <none> <none>
scheduler-resources-6d6785785-vrch5 1/1 Running 0 78s 10.100.140.82 worker233 <none> <none>
scheduler-resources-6d6785785-vrn87 1/1 Running 0 15m 10.100.203.149 worker232 <none> <none>
scheduler-resources-6d6785785-wz9xq 1/1 Running 0 78s 10.100.140.81 worker233 <none> <none>
[root@master231 scheduler]#
Pod调度之nodeAffinity
nodeAffinity翻译为节点亲和性,其作用相比于nodeSelector功能更加强大。
甚至可以取代nodeSelector的地方。
- 2.nodeAffinity实现nodeSelector的功能
2.1 环境准备
[root@master231 scheduler]# kubectl get nodes --show-labels -l school | grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=beijing
worker232 Ready <none> 3d7h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 4h12m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
2.2 准备资源清单
[root@master231 scheduler]# cat 04-deploy-nodeAffinity.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scheduler-affinity
spec:
replicas: 5
selector:
matchLabels:
apps: xiuxian
template:
metadata:
labels:
apps: xiuxian
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: school
values:
- shanghai
operator: In
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: c1
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl apply -f 04-deploy-nodeAffinity.yaml
deployment.apps/scheduler-affinity created
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-affinity-6c67cdff9c-52mrg 1/1 Running 0 8s 10.100.203.164 worker232 <none> <none>
scheduler-affinity-6c67cdff9c-f24fs 1/1 Running 0 8s 10.100.203.160 worker232 <none> <none>
scheduler-affinity-6c67cdff9c-m5xdr 1/1 Running 0 8s 10.100.203.163 worker232 <none> <none>
scheduler-affinity-6c67cdff9c-n4w9w 1/1 Running 0 8s 10.100.203.162 worker232 <none> <none>
scheduler-affinity-6c67cdff9c-wsn7s 1/1 Running 0 8s 10.100.203.161 worker232 <none> <none>
[root@master231 scheduler]#
2.3 nodeAffinity实现nodeSelector实现不了的功能
[root@master231 scheduler]# cat 04-deploy-nodeAffinity.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scheduler-affinity
spec:
replicas: 5
selector:
matchLabels:
apps: xiuxian
template:
metadata:
labels:
apps: xiuxian
spec:
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: school
values:
# - shanghai
- beijing
- shenzhen
operator: In
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: c1
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl apply -f 04-deploy-nodeAffinity.yaml
deployment.apps/scheduler-affinity created
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-affinity-6b5747cfd8-d9dkd 1/1 Running 0 5s 10.100.140.84 worker233 <none> <none>
scheduler-affinity-6b5747cfd8-kmk4d 1/1 Running 0 5s 10.100.140.82 worker233 <none> <none>
scheduler-affinity-6b5747cfd8-m5777 1/1 Running 0 5s 10.100.160.150 master231 <none> <none>
scheduler-affinity-6b5747cfd8-skl7r 1/1 Running 0 5s 10.100.160.151 master231 <none> <none>
scheduler-affinity-6b5747cfd8-t9bf6 1/1 Running 0 5s 10.100.140.83 worker233 <none> <none>
[root@master231 scheduler]#
Pod调度之podAffinity
podAffinity根据已经调度的Pod所在的拓扑域,将后续的所有Pod都调往该拓扑域。
为了方便大家理解,可以暂时将拓扑域理解为机房。
2.1 环境准备
[root@master231 scheduler]# kubectl get nodes --show-labels | grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=beijing
worker232 Ready <none> 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 4h40m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
2.2 编写资源清单
[root@master231 scheduler]# cat 05-deploy-podAffinity.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scheduler-pod-affinity
spec:
replicas: 5
selector:
matchLabels:
apps: xiuxian
template:
metadata:
labels:
apps: xiuxian
spec:
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: school
labelSelector:
matchLabels:
apps: xiuxian
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: c1
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl apply -f 05-deploy-podAffinity.yaml
deployment.apps/scheduler-pod-affinity created
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-pod-affinity-799478ffcd-64pzl 1/1 Running 0 23s 10.100.140.89 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-8d8gd 1/1 Running 0 23s 10.100.140.85 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-mjc8l 1/1 Running 0 23s 10.100.140.86 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-s59zb 1/1 Running 0 23s 10.100.140.88 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-wgsdj 1/1 Running 0 23s 10.100.140.87 worker233 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl scale deployment scheduler-pod-affinity --replicas=10
deployment.apps/scheduler-pod-affinity scaled
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-pod-affinity-799478ffcd-4l5dv 1/1 Running 0 4s 10.100.140.91 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-4l8xx 1/1 Running 0 4s 10.100.140.93 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-64pzl 1/1 Running 0 84s 10.100.140.89 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-7xf8w 1/1 Running 0 4s 10.100.140.94 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-8d8gd 1/1 Running 0 84s 10.100.140.85 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-hdrvs 1/1 Running 0 4s 10.100.140.90 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-kq8hz 1/1 Running 0 4s 10.100.140.92 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-mjc8l 1/1 Running 0 84s 10.100.140.86 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-s59zb 1/1 Running 0 84s 10.100.140.88 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-wgsdj 1/1 Running 0 84s 10.100.140.87 worker233 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-pod-affinity-799478ffcd-4l5dv 1/1 Running 0 10s 10.100.140.91 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-4l8xx 1/1 Running 0 10s 10.100.140.93 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-64pzl 1/1 Running 0 90s 10.100.140.89 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-7xf8w 1/1 Running 0 10s 10.100.140.94 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-8d8gd 1/1 Running 0 90s 10.100.140.85 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-hdrvs 1/1 Running 0 10s 10.100.140.90 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-kq8hz 1/1 Running 0 10s 10.100.140.92 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-mjc8l 1/1 Running 0 90s 10.100.140.86 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-s59zb 1/1 Running 0 90s 10.100.140.88 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-wgsdj 1/1 Running 0 90s 10.100.140.87 worker233 <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes --show-labels |grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=beijing
worker232 Ready <none> 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 4h46m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl label nodes master231 school=shenzhen --overwrite
node/master231 labeled
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes --show-labels |grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=shenzhen
worker232 Ready <none> 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 4h46m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl scale deployment scheduler-pod-affinity --replicas=15
deployment.apps/scheduler-pod-affinity scaled
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes --show-labels |grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=shenzhen
worker232 Ready <none> 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 4h47m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-pod-affinity-799478ffcd-4l5dv 1/1 Running 0 60s 10.100.140.91 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-4l8xx 1/1 Running 0 60s 10.100.140.93 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-64pzl 1/1 Running 0 2m20s 10.100.140.89 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-79pfp 1/1 Running 0 6s 10.100.160.155 master231 <none> <none>
scheduler-pod-affinity-799478ffcd-7xf8w 1/1 Running 0 60s 10.100.140.94 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-8d8gd 1/1 Running 0 2m20s 10.100.140.85 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-hdrvs 1/1 Running 0 60s 10.100.140.90 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-j8zhc 1/1 Running 0 6s 10.100.160.156 master231 <none> <none>
scheduler-pod-affinity-799478ffcd-kq8hz 1/1 Running 0 60s 10.100.140.92 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-mjc8l 1/1 Running 0 2m20s 10.100.140.86 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-pztpw 1/1 Running 0 6s 10.100.160.154 master231 <none> <none>
scheduler-pod-affinity-799478ffcd-s59zb 1/1 Running 0 2m20s 10.100.140.88 worker233 <none> <none>
scheduler-pod-affinity-799478ffcd-tfzqn 1/1 Running 0 6s 10.100.160.152 master231 <none> <none>
scheduler-pod-affinity-799478ffcd-vm8kc 1/1 Running 0 6s 10.100.160.153 master231 <none> <none>
scheduler-pod-affinity-799478ffcd-wgsdj 1/1 Running 0 2m20s 10.100.140.87 worker233 <none> <none>
[root@master231 scheduler]#
Pod调度之podAntiAffinity
podAntiAffinity和podAffinity作用相反。 当Pod已经调度到某个拓扑域中,则后续的Pod不会往该拓扑域调度。
2.1 环境准备
[root@master231 scheduler]# kubectl get nodes --show-labels |grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=shenzhen
worker232 Ready <none> 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 4h57m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
2.2 编写资源清单
[root@master231 scheduler]# cat 06-deploy-podAntiAffinity.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: scheduler-pod-anti-affinity
spec:
replicas: 5
selector:
matchLabels:
apps: xiuxian
template:
metadata:
labels:
apps: xiuxian
spec:
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: school
labelSelector:
matchLabels:
apps: xiuxian
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: c1
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl apply -f 06-deploy-podAntiAffinity.yaml
deployment.apps/scheduler-pod-anti-affinity created
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-pod-anti-affinity-5845b69566-95tz7 1/1 Running 0 3s 10.100.140.95 worker233 <none> <none>
scheduler-pod-anti-affinity-5845b69566-ln7nj 1/1 Running 0 3s 10.100.203.166 worker232 <none> <none>
scheduler-pod-anti-affinity-5845b69566-nzw84 0/1 Pending 0 3s <none> <none> <none> <none>
scheduler-pod-anti-affinity-5845b69566-vnnb5 0/1 Pending 0 3s <none> <none> <none> <none>
scheduler-pod-anti-affinity-5845b69566-wt49q 0/1 Pending 0 3s <none> <none> <none> <none>
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes --show-labels |grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=shenzhen
worker232 Ready <none> 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 4h59m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl label nodes master231 school=beijing --overwrite
node/master231 unlabeled
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get nodes --show-labels |grep school
master231 Ready control-plane,master 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master231,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=,school=beijing
worker232 Ready <none> 3d8h v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker232,kubernetes.io/os=linux,school=shanghai
worker233 Ready <none> 5h1m v1.23.17 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker233,kubernetes.io/os=linux,school=shenzhen
[root@master231 scheduler]#
[root@master231 scheduler]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
scheduler-pod-anti-affinity-5845b69566-95tz7 1/1 Running 0 2m49s 10.100.140.95 worker233 <none> <none>
scheduler-pod-anti-affinity-5845b69566-ln7nj 1/1 Running 0 2m49s 10.100.203.166 worker232 <none> <none>
scheduler-pod-anti-affinity-5845b69566-nzw84 0/1 Pending 0 2m49s <none> <none> <none> <none>
scheduler-pod-anti-affinity-5845b69566-vnnb5 1/1 Running 0 2m49s 10.100.160.157 master231 <none> <none>
scheduler-pod-anti-affinity-5845b69566-wt49q 0/1 Pending 0 2m49s <none> <none> <none> <none>
[root@master231 scheduler]#