环境变量
[root@worker232 ~]# wget http://192.168.16.253/Resources/Docker/images/WordPress/violet-mysql-v8.0.36-oracle.tar.gz
[root@worker232 ~]# docker load -i violet-mysql-v8.0.36-oracle.tar.gz
[root@master231 pods]# cat 03-pods-mysql-env.yaml
apiVersion: v1
kind: Pod
metadata:
name: violet-db
spec:
nodeName: worker232
containers:
- image: mysql:8.0.36-oracle
name: db
# 向容器传递环境变量
env:
# 指定环境变量的名称
- name: MYSQL_DATABASE
# 指定环境变量的值
value: wordpress
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "yes"
- name: MYSQL_USER
value: admin
- name: MYSQL_PASSWORD
value: lax
# 向容器传参
args:
- --character-set-server=utf8
- --collation-server=utf8_bin
- --default-authentication-plugin=mysql_native_password
[root@master231 pods]#
[root@master231 pods]# kubectl apply -f 03-pods-mysql-env.yaml
pod/violet-db created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-db 1/1 Running 0 3s 10.100.203.140 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl exec -it violet-db mysql
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 8
Server version: 8.0.36 MySQL Community Server - GPL
Copyright (c) 2000, 2024, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> SHOW DATABASES;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
| wordpress |
+--------------------+
5 rows in set (0.00 sec)
mysql>
mysql> SELECT user,host,plugin FROM mysql.user;
+------------------+-----------+-----------------------+
| user | host | plugin |
+------------------+-----------+-----------------------+
| admin | % | mysql_native_password |
| root | % | mysql_native_password |
| mysql.infoschema | localhost | caching_sha2_password |
| mysql.session | localhost | caching_sha2_password |
| mysql.sys | localhost | caching_sha2_password |
| root | localhost | mysql_native_password |
+------------------+-----------+-----------------------+
6 rows in set (0.00 sec)
mysql>
hostNetwork之WordPress案例
[root@worker233 ~]# wget http://192.168.16.253/Resources/Docker/images/WordPress/violet-wordpress-v6.7.1-php8.1-apache.tar.gz
[root@master231 pods]# cat 04-pods-wodpress-hostNetwork.yaml
apiVersion: v1
kind: Pod
metadata:
name: violet-wp
spec:
# 表示不为Pod分片网络名称空间,而是和宿主机共用相同的网络名称空间
hostNetwork: true
nodeName: worker233
containers:
- image: wordpress:6.7.1-php8.1-apache
name: wp
env:
- name: WORDPRESS_DB_HOST
value: 10.100.203.140
- name: WORDPRESS_DB_NAME
value: wordpress
- name: WORDPRESS_DB_USER
value: admin
- name: WORDPRESS_DB_PASSWORD
value: lax
[root@master231 pods]#
[root@master231 pods]# kubectl create -f 04-pods-wodpress-hostNetwork.yaml
pod/violet-wp created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-db 1/1 Running 0 15m 10.100.203.140 worker232 <none> <none>
violet-wp 1/1 Running 0 4s 10.0.0.233 worker233 <none> <none>
[root@master231 pods]#
可以通过节点ip访问
重启策略
Pod重启策略表示对所有容器生效,当容器退出时是否需要重新创建容器。
官方有效的重启策略有三种:
Always:
当容器退出时,始终重新创建新的容器。默认就是ALways。
OnFailure:
当容器异常退出时,才会重新创建新的容器。
Never:
当容器退出时,始终不重新创建容器。
[root@master231 pods]# cat 05-pods-restartPolicy.yaml
apiVersion: v1
kind: Pod
metadata:
name: violet-restartpolicy
spec:
# 指定重启策略
restartPolicy: Always
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
command:
- sleep
- "10"
[root@master231 pods]#
[root@master231 pods]# kubectl create -f 05-pods-restartPolicy.yaml
pod/violet-restartpolicy created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 1/1 Running 1 (10s ago) 21s 10.100.203.141 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 0/1 Completed 1 (23s ago) 34s 10.100.203.141 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 1/1 Running 2 (18s ago) 39s 10.100.203.141 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# cat 05-pods-restartPolicy.yaml
apiVersion: v1
kind: Pod
metadata:
name: violet-restartpolicy
spec:
# 指定重启策略
# restartPolicy: Always
restartPolicy: Never
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
command:
- sleep
- "10"
[root@master231 pods]#
[root@master231 pods]# kubectl create -f 05-pods-restartPolicy.yaml
pod/violet-restartpolicy created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 1/1 Running 0 4s 10.100.203.142 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 1/1 Running 0 8s 10.100.203.142 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 1/1 Running 0 11s 10.100.203.142 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 0/1 Completed 0 18s 10.100.203.142 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 0/1 Completed 0 37s 10.100.203.142 worker232 <none> <none>
[root@master231 pods]#
4.1 编写资源清单
[root@master231 pods]# cat 05-pods-restartPolicy.yaml
apiVersion: v1
kind: Pod
metadata:
name: violet-restartpolicy
spec:
# 指定重启策略
# restartPolicy: Always
# restartPolicy: Never
restartPolicy: OnFailure
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
command:
- sleep
- "30"
[root@master231 pods]#
4.2 杀死容器测试
[root@worker232 ~]# docker ps -a | grep violet-restartpolicy
916ca13cef1b f28fd43be4ad "sleep 30" 15 seconds ago Up 15 seconds k8s_xiuxian_violet-restartpolicy_default_3ffea830-7326-4734-b32c-4afe136969a7_0
fa37b9d0124d registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 16 seconds ago Up 15 seconds k8s_POD_violet-restartpolicy_default_3ffea830-7326-4734-b32c-4afe136969a7_0
8beab96730e8 f28fd43be4ad "sleep 10" 2 minutes ago Exited (0) About a minute ago k8s_xiuxian_violet-restartpolicy_default_5302ea91-a2cc-4064-aa13-d2d172ece52e_0
f3e1f164e4f5 registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 2 minutes ago Exited (0) About a minute ago k8s_POD_violet-restartpolicy_default_5302ea91-a2cc-4064-aa13-d2d172ece52e_0
[root@worker232 ~]#
[root@worker232 ~]# docker kill 916ca13cef1b
916ca13cef1b
[root@worker232 ~]#
[root@worker232 ~]# docker ps -a | grep violet-restartpolicy
e187244ef978 f28fd43be4ad "sleep 30" 2 seconds ago Up 1 second k8s_xiuxian_violet-restartpolicy_default_3ffea830-7326-4734-b32c-4afe136969a7_1
916ca13cef1b f28fd43be4ad "sleep 30" 27 seconds ago Exited (137) 2 seconds ago k8s_xiuxian_violet-restartpolicy_default_3ffea830-7326-4734-b32c-4afe136969a7_0
fa37b9d0124d registry.aliyuncs.com/google_containers/pause:3.6 "/pause" 28 seconds ago Up 27 seconds k8s_POD_violet-restartpolicy_default_3ffea830-7326-4734-b32c-4afe136969a7_0
[root@worker232 ~]#
4.4 异常退出会重启
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 1/1 Running 0 6s 10.100.203.143 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 1/1 Running 1 (10s ago) 35s 10.100.203.143 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 0/1 Completed 1 88s 10.100.203.143 worker232 <none> <none>
[root@master231 pods]#
k8s对接harbor共有仓库
[root@harbor250.violet.com ~]# cd /usr/local/harbor/
[root@harbor250.violet.com harbor]#
[root@harbor250.violet.com harbor]# ll
total 636540
drwxr-xr-x 4 root root 4096 Mar 25 10:46 ./
drwxr-xr-x 11 root root 4096 Mar 25 10:30 ../
drwxr-xr-x 5 root root 4096 Mar 25 10:31 certs/
drwxr-xr-x 3 root root 4096 Mar 25 10:46 common/
-rw-r--r-- 1 root root 3646 Jan 16 22:10 common.sh
-rw-r--r-- 1 root root 5993 Mar 25 10:46 docker-compose.yml
-rw-r--r-- 1 root root 651727378 Jan 16 22:11 harbor.v2.12.2.tar.gz
-rw-r--r-- 1 root root 14548 Mar 25 10:43 harbor.yml
-rw-r--r-- 1 root root 14288 Jan 16 22:10 harbor.yml.tmpl
-rwxr-xr-x 1 root root 1975 Jan 16 22:10 install.sh*
-rw-r--r-- 1 root root 11347 Jan 16 22:10 LICENSE
-rwxr-xr-x 1 root root 2211 Jan 16 22:10 prepare*
[root@harbor250.violet.com harbor]# docker-compose down -t 0
[+] Running 10/10
✔ Container harbor-jobservice Removed 0.0s
✔ Container nginx Removed 0.0s
✔ Container registryctl Removed 0.0s
✔ Container harbor-core Removed 0.0s
✔ Container harbor-portal Removed 0.0s
✔ Container harbor-db Removed 0.1s
✔ Container registry Removed 0.0s
✔ Container redis Removed 0.0s
✔ Container harbor-log Removed 0.2s
✔ Network harbor_harbor Removed 0.2s
[root@harbor250.violet.com harbor]#
[root@harbor250.violet.com harbor]# docker-compose up -d
[+] Building 0.0s (0/0) docker:default
[+] Running 10/10
✔ Network harbor_harbor Created 0.1s
✔ Container harbor-log Started 0.0s
✔ Container harbor-db Started 0.0s
✔ Container redis Started 0.0s
✔ Container harbor-portal Started 0.1s
✔ Container registryctl Started 0.0s
✔ Container registry Started 0.1s
✔ Container harbor-core Started 0.0s
✔ Container nginx Started 0.0s
✔ Container harbor-jobservice Started 0.0s
[root@harbor250.violet.com harbor]#
推荐项目名称为: "violet-xiuxian"
[root@worker232 ~]# vim /lib/systemd/system/docker.service
[root@worker232 ~]#
[root@worker232 ~]# systemctl daemon-reload
[root@worker232 ~]#
[root@worker232 ~]# systemctl cat docker
# /lib/systemd/system/docker.service
[Unit]
Description=violet linux Docke Engine
Documentation=https://docs.docker.com,https://www.violet.com
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd
# 配置docker代理
#Environment="HTTP_PROXY=http://10.0.0.1:7890"
#Environment="HTTPS_PROXY=http://10.0.0.1:7890"
[Install]
WantedBy=multi-user.target
[root@worker232 ~]#
[root@worker232 ~]# systemctl restart docker
[root@worker232 ~]#
4.所有节点拷贝证书文件【master231和worker233步骤相同,自行完成】
[root@worker232 ~]# echo 10.0.0.250 harbor250.violet.com >> /etc/hosts
[root@worker232 ~]# mkdir -pv /etc/docker/certs.d/harbor250.violet.com/
mkdir: created directory '/etc/docker/certs.d'
mkdir: created directory '/etc/docker/certs.d/harbor250.violet.com/'
[root@worker232 ~]#
[root@worker232 ~]# scp harbor250.violet.com:/usr/local/harbor/certs/docker-client/* /etc/docker/certs.d/harbor250.violet.com/
[root@worker232 ~]#
[root@worker232 ~]# ll /etc/docker/certs.d/harbor250.violet.com/
total 20
drwxr-xr-x 2 root root 4096 Apr 8 10:30 ./
drwxr-xr-x 3 root root 4096 Apr 8 10:30 ../
-rw-r--r-- 1 root root 2049 Apr 8 10:30 ca.crt
-rw-r--r-- 1 root root 2155 Apr 8 10:30 harbor250.violet.com.cert
-rw------- 1 root root 3268 Apr 8 10:30 harbor250.violet.com.key
[root@worker232 ~]#
[root@worker233 ~]# ll /etc/docker/certs.d/harbor250.violet.com/
total 20
drwxr-xr-x 2 root root 4096 Apr 8 10:33 ./
drwxr-xr-x 3 root root 4096 Apr 8 10:33 ../
-rw-r--r-- 1 root root 2049 Apr 8 10:33 ca.crt
-rw-r--r-- 1 root root 2155 Apr 8 10:33 harbor250.violet.com.cert
-rw------- 1 root root 3268 Apr 8 10:33 harbor250.violet.com.key
[root@worker233 ~]#
[root@master231 pods]# ll /etc/docker/certs.d/harbor250.violet.com/
total 20
drwxr-xr-x 2 root root 4096 Apr 8 10:33 ./
drwxr-xr-x 3 root root 4096 Apr 8 10:33 ../
-rw-r--r-- 1 root root 2049 Apr 8 10:33 ca.crt
-rw-r--r-- 1 root root 2155 Apr 8 10:33 harbor250.violet.com.cert
-rw------- 1 root root 3268 Apr 8 10:33 harbor250.violet.com.key
[root@master231 pods]#
[root@worker232 ~]# docker login -u admin -p 1 harbor250.violet.com
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
[root@worker232 ~]#
[root@worker232 ~]# docker tag registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1 harbor250.violet.com/violet-xiuxian/apps:v1
[root@worker232 ~]#
[root@worker232 ~]# docker push harbor250.violet.com/violet-xiuxian/apps:v1
The push refers to repository [harbor250.violet.com/violet-xiuxian/apps]
8e2be8913e57: Pushed
9d5b000ce7c7: Pushed
b8dbe22b95f7: Pushed
c39c1c35e3e8: Pushed
5f66747c8a72: Pushed
15d7cdc64789: Pushed
7fcb75871b21: Pushed
v1: digest: sha256:3bee216f250cfd2dbda1744d6849e27118845b8f4d55dda3ca3c6c1227cc2e5c size: 1778
[root@worker232 ~
[root@master231 pods]# cat 06-pods-harbor-xiuxian.yaml
apiVersion: v1
kind: Pod
metadata:
name: xiuxian-harbor
spec:
containers:
- image: harbor250.violet.com/violet-xiuxian/apps:v1
name: xiuxian
[root@master231 pods]#
[root@master231 pods]# kubectl create -f 06-pods-harbor-xiuxian.yaml
pod/xiuxian-harbor created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
violet-restartpolicy 0/1 Completed 1 32m 10.100.203.143 worker232 <none> <none>
xiuxian-harbor 1/1 Running 0 5s 10.100.203.146 worker232 <none> <none>
[root@master231 pods]#
[root@master231 pods]#
[root@master231 pods]# curl 10.100.203.146
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>lax apps v1</title>
<style>
div img {
width: 900px;
height: 600px;
margin: 0;
}
</style>
</head>
<body>
<h1 style="color: green">凡人修仙传 v1 </h1>
<div>
<img src="1.jpg">
<div>
</body>
</html>
[root@master231 pods]#
[root@master231 pods]#
[root@master231 pods]# kubectl delete pods --all
pod "violet-restartpolicy" deleted
pod "xiuxian-harbor" deleted
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
No resources found in default namespace.
[root@master231 pods]#
镜像下载策略
所谓的镜像下载策略指的是在启动容器之前,镜像如何获取?
官方有三种拉取策略:
Always:
如果本地没有镜像,则始终会去远程仓库拉取镜像。
如果本地有镜像,则会对比本地的镜像摘要信息和远程仓库的摘要信息,若相同,则使用本地缓存镜像,若不同,则拉取镜像。
Never:
如果本地没有镜像,则报错,不会拉取镜像。
如果本地有镜像,则会尝试启动。
IfNotPresent:
如果本地没有镜像,则会拉取镜像。
如果本地有镜像,则会尝试启动。
默认策略说明:
默认的拉取策略根据用户镜像的标签而定,当镜像的标签为"latest"则默认策略为ALways,当镜像的标签非"latest"则默认的拉取策略为"IfNotPresent"。
2.1 编写Dockerfile
[root@worker232 ~]# cat Dockerfile
FROM harbor250.violet.com/violet-xiuxian/apps:v1
MAINTAINER JasonYin
LABEL school=violet \
class=linux96
RUN mkdir /violet-xixi && \
touch /violet-xixi/haha.log
CMD ["sleep","3600"]
[root@worker232 ~]#
[root@worker232 ~]# docker build -t harbor250.violet.com/violet-test/demo:v1 .
[root@worker232 ~]#
[root@worker232 ~]# docker push harbor250.violet.com/violet-test/demo:v1
[root@worker232 ~]#
2.2 验证Never策略
[root@master231 pods]# cat 07-pods-imagePullPolicy.yaml
apiVersion: v1
kind: Pod
metadata:
name: xiuxian-imagepullpolicy
spec:
nodeName: worker233
containers:
- image: harbor250.violet.com/violet-test/demo:v1
name: xiuxian
imagePullPolicy: Never
[root@master231 pods]#
[root@master231 pods]# kubectl create -f 07-pods-imagePullPolicy.yaml
pod/xiuxian-imagepullpolicy created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
xiuxian-imagepullpolicy 0/1 ErrImageNeverPull 0 3s 10.100.140.77 worker233 <none> <none>
[root@master231 pods]#
2.3 验证IfNotPresent
[root@master231 pods]# cat 07-pods-imagePullPolicy.yaml
apiVersion: v1
kind: Pod
metadata:
name: xiuxian-imagepullpolicy
spec:
nodeName: worker233
containers:
- image: harbor250.violet.com/violet-test/demo:v1
name: xiuxian
# imagePullPolicy: Never
imagePullPolicy: IfNotPresent
[root@master231 pods]#
2.4 再次打镜像测试
[root@worker232 ~]# cat Dockerfile
FROM harbor250.violet.com/violet-xiuxian/apps:v1
MAINTAINER JasonYin
LABEL school=violet \
class=linux96
RUN mkdir /violet-xixi && \
touch /violet-xixi/haha.log
RUN mkdir /violet-haHA && \
touch /violet-haHA/xixi.log
CMD ["sleep","7200"]
[root@worker232 ~]#
[root@worker232 ~]#
[root@worker232 ~]# docker build -t harbor250.violet.com/violet-test/demo:v1 .
[root@worker232 ~]#
[root@worker232 ~]# docker push harbor250.violet.com/violet-test/demo:v1
[root@worker232 ~]#
2.5 验证Always
[root@master231 pods]#
[root@master231 pods]# cat 07-pods-imagePullPolicy.yaml
apiVersion: v1
kind: Pod
metadata:
name: xiuxian-imagepullpolicy
spec:
nodeName: worker233
containers:
- image: harbor250.violet.com/violet-test/demo:v1
name: xiuxian
# imagePullPolicy: Never
# imagePullPolicy: IfNotPresent
imagePullPolicy: Always
[root@master231 pods]#
[root@master231 pods]# kubectl apply -f 07-pods-imagePullPolicy.yaml
pod/xiuxian-imagepullpolicy created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
xiuxian-imagepullpolicy 1/1 Running 0 5s 10.100.140.81 worker233 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl exec xiuxian-imagepullpolicy -- ls -l /violet-haHA
total 0
-rw-r--r-- 1 root root 0 Apr 8 03:12 xixi.log
[root@master231 pods]#
[root@worker233 ~]# docker image ls harbor250.violet.com/violet-test/demo:v1
REPOSITORY TAG IMAGE ID CREATED SIZE
harbor250.violet.com/violet-test/demo v1 4aa310d2d4a6 10 minutes ago 23MB
[root@worker233 ~]#
[root@worker233 ~]# docker image ls harbor250.violet.com/violet-test/demo:v1
REPOSITORY TAG IMAGE ID CREATED SIZE
harbor250.violet.com/violet-test/demo v1 5eeea2c6ece2 3 minutes ago 23MB
[root@worker233 ~]#
资源限制
资源限制目的是为了限制容器的资源使用上限或者期望资源。
requests:
表示期望调度节点拥有的空闲资源。
但实际调度后并不会立刻使用这些资源。
limits:
表示使用资源的上限。
温馨提示:
如果不定义requests字段,则默认和limits相同。
如果不定义resource字段,则默认和宿主机的资源相同。
[root@master231 pods]# cat 08-pods-resources.yaml
apiVersion: v1
kind: Pod
metadata:
name: xiuxian-resources
spec:
nodeName: worker233
containers:
- image: jasonyin2020/violet-linux-tools:v0.1
name: c1
# 配置资源限制,如果不定义,则默认是宿主机的所有资源。
resources:
# 期望资源,如果不定义,则默认和limits相同。
requests:
cpu: 0.5
memory: 1Gi
# 使用资源的上限
limits:
cpu: 1.5
memory: 2Gi
command:
- tail
- -f
- /etc/hosts
imagePullPolicy: IfNotPresent
[root@master231 pods]#
[root@master231 pods]# kubectl apply -f 08-pods-resources.yaml
pod/xiuxian-resources created
[root@master231 pods]#
[root@master231 pods]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
xiuxian-resources 1/1 Running 0 3s 10.100.140.87 worker233 <none> <none>
[root@master231 pods]#
[root@master231 pods]# kubectl exec -it xiuxian-resources -- sh
/usr/local/stress #
/usr/local/stress # stress -m 10 --vm-bytes 200000000 --vm-keep --verbose
[root@worker233 ~]# docker ps -a | grep xiuxian-resources
...
[root@worker233 ~]# docker stats <容器的ID>
Continue Reading