名称空间
所谓的名称空间就是k8s用来隔离资源的。
k8s一切皆资源,有些资源是支持名称空间的,我们称之为局部资源。有些不支持名称空间的我们称之为全局资源。
如何从查看资源是否支持名称空间呢?可以通过以下命令的"NAMESPACED"字段查看。如果为true表示支持名称空间,如果为false表示不支持名称空间。
[root@master231 manifests]# kubectl api-resources
NAME SHORTNAMES APIVERSION NAMESPACED KIND
bindings v1 true Binding
componentstatuses cs v1 false ComponentStatus
configmaps cm v1 true ConfigMap
endpoints ep v1 true Endpoints
events ev v1 true Event
limitranges limits v1 true LimitRange
namespaces ns v1 false Namespace
nodes no v1 false Node
persistentvolumeclaims pvc v1 true PersistentVolumeClaim
persistentvolumes pv v1 false PersistentVolume
pods po v1 true Pod
podtemplates v1 true PodTemplate
replicationcontrollers rc v1 true ReplicationController
...
2.1 查看现有的名称空间
[root@master231 manifests]# kubectl get ns
NAME STATUS AGE
calico-apiserver Active 46h
calico-system Active 47h
default Active 47h
kube-node-lease Active 47h
kube-public Active 47h
kube-system Active 47h
tigera-operator Active 47h
[root@master231 manifests]#
2.2 响应式创建名称空间
[root@master231 manifests]# kubectl create namespace violet
namespace/violet created
[root@master231 manifests]#
[root@master231 manifests]# kubectl get ns
NAME STATUS AGE
calico-apiserver Active 46h
calico-system Active 47h
default Active 47h
kube-node-lease Active 47h
kube-public Active 47h
kube-system Active 47h
violet Active 1s
tigera-operator Active 47h
[root@master231 manifests]#
[root@master231 manifests]# kubectl get ns violet
NAME STATUS AGE
violet Active 7s
[root@master231 manifests]#
2.3 删除名称空间
[root@master231 manifests]# kubectl delete ns violet
namespace "violet" deleted
[root@master231 manifests]#
[root@master231 manifests]# kubectl get ns
NAME STATUS AGE
calico-apiserver Active 46h
calico-system Active 47h
default Active 47h
kube-node-lease Active 47h
kube-public Active 47h
kube-system Active 47h
tigera-operator Active 47h
[root@master231 manifests]#
[root@master231 manifests]# kubectl get ns violet
Error from server (NotFound): namespaces "violet" not found
[root@master231 manifests]#
温馨提示:
删除名称空间意味着删除该名称空间下的所有资源,生产环境一定要谨慎使用!!!
2.4 声明式创建名称空间
[root@master231 namespace]# kubectl create ns violet -o yaml --dry-run=client > 01-ns.yaml
[root@master231 namespace]#
[root@master231 namespace]# vim 01-ns.yaml
[root@master231 namespace]#
[root@master231 namespace]# cat 01-ns.yaml
apiVersion: v1
kind: Namespace
metadata:
name: violet
[root@master231 namespace]#
[root@master231 namespace]# kubectl apply -f 01-ns.yaml
namespace/violet created
[root@master231 namespace]#
[root@master231 namespace]# kubectl get -f 01-ns.yaml
NAME STATUS AGE
violet Active 3s
[root@master231 namespace]#
[root@master231 namespace]# kubectl get ns
NAME STATUS AGE
calico-apiserver Active 47h
calico-system Active 47h
default Active 47h
kube-node-lease Active 47h
kube-public Active 47h
kube-system Active 47h
violet Active 8s
tigera-operator Active 47h
[root@master231 namespace]#
2.5 查看指定名称空间的资源
[root@master231 namespace]# kubectl get pods -n calico-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-76d5c7cfc-89z7j 1/1 Running 1 (46h ago) 47h
calico-node-4cvnj 1/1 Running 2 (24h ago) 47h
calico-node-d4554 1/1 Running 1 (46h ago) 47h
calico-node-qbxmn 1/1 Running 1 (46h ago) 47h
calico-typha-595f8c6fcb-9pm4b 1/1 Running 2 (46h ago) 47h
calico-typha-595f8c6fcb-bhdw6 1/1 Running 3 (24h ago) 47h
csi-node-driver-7z4hj 2/2 Running 4 (24h ago) 47h
csi-node-driver-8vj74 2/2 Running 2 (46h ago) 47h
csi-node-driver-m66z9 2/2 Running 2 (46h ago) 47h
[root@master231 namespace]#
[root@master231 namespace]# kubectl get pods
NAME READY STATUS RESTARTS AGE
hello-29069446-bwkvl 0/1 Completed 0 2m56s
hello-29069447-qb588 0/1 Completed 0 116s
hello-29069448-jrl7t 0/1 Completed 0 56s
[root@master231 namespace]#
[root@master231 namespace]# kubectl get pods -n default
NAME READY STATUS RESTARTS AGE
hello-29069446-bwkvl 0/1 Completed 0 3m1s
hello-29069447-qb588 0/1 Completed 0 2m1s
hello-29069448-jrl7t 0/1 Completed 0 61s
hello-29069449-v6rww 0/1 ContainerCreating 0 1s
[root@master231 namespace]#
[root@master231 namespace]# kubectl get ds,deploy,svc,po -o wide -n kube-system
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
daemonset.apps/kube-proxy 3 3 3 3 3 kubernetes.io/os=linux 47h kube-proxy registry.aliyuncs.com/google_containers/kube-proxy:v1.23.17 k8s-app=kube-proxy
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/coredns 2/2 2 2 47h coredns registry.aliyuncs.com/google_containers/coredns:v1.8.6 k8s-app=kube-dns
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/kube-dns ClusterIP 10.200.0.10 <none> 53/UDP,53/TCP,9153/TCP 47h k8s-app=kube-dns
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/coredns-6d8c4cb4d-bknzr 1/1 Running 1 (46h ago) 47h 10.100.160.135 master231 <none> <none>
pod/coredns-6d8c4cb4d-cvp9w 1/1 Running 1 (46h ago) 47h 10.100.160.133 master231 <none> <none>
pod/etcd-master231 1/1 Running 1 (46h ago) 47h 10.0.0.231 master231 <none> <none>
pod/kube-apiserver-master231 1/1 Running 1 (46h ago) 47h 10.0.0.231 master231 <none> <none>
pod/kube-controller-manager-master231 1/1 Running 1 (46h ago) 47h 10.0.0.231 master231 <none> <none>
pod/kube-proxy-6jt4j 1/1 Running 2 (24h ago) 47h 10.0.0.232 worker232 <none> <none>
pod/kube-proxy-q5prf 1/1 Running 1 (46h ago) 47h 10.0.0.231 master231 <none> <none>
pod/kube-proxy-vwdqx 1/1 Running 1 (46h ago) 47h 10.0.0.233 worker233 <none> <none>
pod/kube-scheduler-master231 1/1 Running 1 (46h ago) 47h 10.0.0.231 master231 <none> <none>
[root@master231 namespace]#
[root@master231 namespace]# kubectl get pods --namespace calico-apiserver -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-apiserver-64b779ff45-4nzng 1/1 Running 1 (46h ago) 47h 10.100.140.67 worker233 <none> <none>
calico-apiserver-64b779ff45-957vg 1/1 Running 2 (24h ago) 47h 10.100.203.144 worker232 <none> <none>
[root@master231 namespace]#
2.6 查看所有名称空间的下的资源
[root@master231 namespace]# kubectl get ds,po --all-namespaces
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
calico-system daemonset.apps/calico-node 3 3 3 3 3 kubernetes.io/os=linux 47h
calico-system daemonset.apps/csi-node-driver 3 3 3 3 3 kubernetes.io/os=linux 47h
kube-system daemonset.apps/kube-proxy 3 3 3 3 3 kubernetes.io/os=linux 47h
NAMESPACE NAME READY STATUS RESTARTS AGE
calico-apiserver pod/calico-apiserver-64b779ff45-4nzng 1/1 Running 1 (46h ago) 47h
calico-apiserver pod/calico-apiserver-64b779ff45-957vg 1/1 Running 2 (24h ago) 47h
calico-system pod/calico-kube-controllers-76d5c7cfc-89z7j 1/1 Running 1 (46h ago) 47h
calico-system pod/calico-node-4cvnj 1/1 Running 2 (24h ago) 47h
calico-system pod/calico-node-d4554 1/1 Running 1 (46h ago) 47h
calico-system pod/calico-node-qbxmn 1/1 Running 1 (46h ago) 47h
calico-system pod/calico-typha-595f8c6fcb-9pm4b 1/1 Running 2 (46h ago) 47h
calico-system pod/calico-typha-595f8c6fcb-bhdw6 1/1 Running 3 (24h ago) 47h
calico-system pod/csi-node-driver-7z4hj 2/2 Running 4 (24h ago) 47h
calico-system pod/csi-node-driver-8vj74 2/2 Running 2 (46h ago) 47h
calico-system pod/csi-node-driver-m66z9 2/2 Running 2 (46h ago) 47h
default pod/hello-29069449-v6rww 0/1 Completed 0 2m37s
default pod/hello-29069450-7cgn2 0/1 Completed 0 97s
default pod/hello-29069451-tjdc8 0/1 Completed 0 37s
kube-system pod/coredns-6d8c4cb4d-bknzr 1/1 Running 1 (46h ago) 47h
kube-system pod/coredns-6d8c4cb4d-cvp9w 1/1 Running 1 (46h ago) 47h
kube-system pod/etcd-master231 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-apiserver-master231 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-controller-manager-master231 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-proxy-6jt4j 1/1 Running 2 (24h ago) 47h
kube-system pod/kube-proxy-q5prf 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-proxy-vwdqx 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-scheduler-master231 1/1 Running 1 (46h ago) 47h
tigera-operator pod/tigera-operator-8d497bb9f-bq8gw 1/1 Running 2 (46h ago) 47h
[root@master231 namespace]#
[root@master231 namespace]#
[root@master231 namespace]# kubectl get ds,po -A
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
calico-system daemonset.apps/calico-node 3 3 3 3 3 kubernetes.io/os=linux 47h
calico-system daemonset.apps/csi-node-driver 3 3 3 3 3 kubernetes.io/os=linux 47h
kube-system daemonset.apps/kube-proxy 3 3 3 3 3 kubernetes.io/os=linux 47h
NAMESPACE NAME READY STATUS RESTARTS AGE
calico-apiserver pod/calico-apiserver-64b779ff45-4nzng 1/1 Running 1 (46h ago) 47h
calico-apiserver pod/calico-apiserver-64b779ff45-957vg 1/1 Running 2 (24h ago) 47h
calico-system pod/calico-kube-controllers-76d5c7cfc-89z7j 1/1 Running 1 (46h ago) 47h
calico-system pod/calico-node-4cvnj 1/1 Running 2 (24h ago) 47h
calico-system pod/calico-node-d4554 1/1 Running 1 (46h ago) 47h
calico-system pod/calico-node-qbxmn 1/1 Running 1 (46h ago) 47h
calico-system pod/calico-typha-595f8c6fcb-9pm4b 1/1 Running 2 (46h ago) 47h
calico-system pod/calico-typha-595f8c6fcb-bhdw6 1/1 Running 3 (24h ago) 47h
calico-system pod/csi-node-driver-7z4hj 2/2 Running 4 (24h ago) 47h
calico-system pod/csi-node-driver-8vj74 2/2 Running 2 (46h ago) 47h
calico-system pod/csi-node-driver-m66z9 2/2 Running 2 (46h ago) 47h
default pod/hello-29069449-v6rww 0/1 Completed 0 2m48s
default pod/hello-29069450-7cgn2 0/1 Completed 0 108s
default pod/hello-29069451-tjdc8 0/1 Completed 0 48s
kube-system pod/coredns-6d8c4cb4d-bknzr 1/1 Running 1 (46h ago) 47h
kube-system pod/coredns-6d8c4cb4d-cvp9w 1/1 Running 1 (46h ago) 47h
kube-system pod/etcd-master231 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-apiserver-master231 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-controller-manager-master231 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-proxy-6jt4j 1/1 Running 2 (24h ago) 47h
kube-system pod/kube-proxy-q5prf 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-proxy-vwdqx 1/1 Running 1 (46h ago) 47h
kube-system pod/kube-scheduler-master231 1/1 Running 1 (46h ago) 47h
tigera-operator pod/tigera-operator-8d497bb9f-bq8gw 1/1 Running 2 (46h ago) 47h
[root@master231 namespace]#
3.1 编写资源清单
[root@master231 namespace]# cat 02-ns-svc-deploy.yaml
apiVersion: v1
kind: Namespace
metadata:
name: violet
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian
namespace: violet
labels:
apps: xiuxian
spec:
replicas: 5
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
---
apiVersion: v1
kind: Service
metadata:
name: svc-xiuxian-nodeport
namespace: violet
spec:
type: NodePort
ports:
- port: 90
protocol: TCP
targetPort: 80
nodePort: 30110
selector:
version: v1
[root@master231 namespace]#
3.2 查看资源
[root@master231 namespace]# kubectl get deploy,rs,svc,po -o wide -n violet
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/deploy-xiuxian 5/5 5 5 4m53s xiuxian registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1 version=v1
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/deploy-xiuxian-9ddcfd7db 5 5 5 4m53s xiuxian registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1 pod-template-hash=9ddcfd7db,version=v1
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/svc-xiuxian-nodeport NodePort 10.200.2.134 <none> 90:30110/TCP 2m version=v1
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/deploy-xiuxian-9ddcfd7db-4vvb5 1/1 Running 0 4m53s 10.100.140.117 worker233 <none> <none>
pod/deploy-xiuxian-9ddcfd7db-57r2l 1/1 Running 0 4m53s 10.100.203.158 worker232 <none> <none>
pod/deploy-xiuxian-9ddcfd7db-fvxqf 1/1 Running 0 4m53s 10.100.203.156 worker232 <none> <none>
pod/deploy-xiuxian-9ddcfd7db-q2hv7 1/1 Running 0 4m53s 10.100.203.159 worker232 <none> <none>
pod/deploy-xiuxian-9ddcfd7db-r6bht 1/1 Running 0 4m53s 10.100.140.118 worker233 <none> <none>
[root@master231 namespace]#
[root@master231 namespace]# kubectl -n violet describe svc svc-xiuxian-nodeport
Name: svc-xiuxian-nodeport
Namespace: violet
Labels: <none>
Annotations: <none>
Selector: version=v1
Type: NodePort
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.200.2.134
IPs: 10.200.2.134
Port: <unset> 90/TCP
TargetPort: 80/TCP
NodePort: <unset> 30110/TCP
Endpoints: 10.100.140.117:80,10.100.140.118:80,10.100.203.156:80 + 2 more...
Session Affinity: None
External Traffic Policy: Cluster
Events: <none>
[root@master231 namespace]#
3.3 删除名称空间后,该名称空间下所有资源都被删除
[root@master231 namespace]# kubectl delete ns violet
namespace "violet" deleted
[root@master231 namespace]#
[root@master231 namespace]# kubectl get deploy,rs,svc,po -o wide -n violet
No resources found in violet namespace.
[root@master231 namespace]#
[root@master231 namespace]# kubectl -n violet describe svc svc-xiuxian-nodeport
Error from server (NotFound): namespaces "violet" not found
[root@master231 namespace]#
svc的底层实现之kube-proxy的代理模式
对于kube-proxy组件的作用就是为k8s集群外部或内部用户提供访问服务的路由。
kube-proxy监听K8S APIServer,一旦service资源发生变化,kube-proxy就会生成对应的负载调度的调整,这样就保证service的最新状态。
kube-proxy有三种调度模型:
- userspace:
k8s 1.1之前。
- iptables:
k8s 1.2 ~ k8s 1.11之前。
- ipvs:
K8S 1.11之后,如果没有开启ipvs,则自动降级为iptables。
iptables与ipvs对比:
相同点:
都工作在内核空间;
不同点:
iptables:
优点:
灵活,功能强大,可以在数据包不同阶段进行操作。
缺点:
表中规则过多时,响应变慢,即规则遍历匹配和更新,呈线性时延。
换句话说,时间复杂度为: O(N)
ipvs:
优点:
转发效率高,调度算法丰富,支持rr,wrr,lc,wlc,ip hash等。
缺点:
内核支持不全,低版本内核不能使用,需要升级到4.9+内核。
[root@master231 namespace]# kubectl get pods -n kube-system -l k8s-app=kube-proxy -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-proxy-6jt4j 1/1 Running 2 (25h ago) 2d 10.0.0.232 worker232 <none> <none>
kube-proxy-q5prf 1/1 Running 1 (47h ago) 2d 10.0.0.231 master231 <none> <none>
kube-proxy-vwdqx 1/1 Running 1 (47h ago) 2d 10.0.0.233 worker233 <none> <none>
[root@master231 namespace]#
[root@master231 namespace]#
[root@master231 namespace]# kubectl -n kube-system logs kube-proxy-6jt4j
I0408 02:26:26.619004 1 node.go:163] Successfully retrieved node IP: 10.0.0.232
I0408 02:26:26.619071 1 server_others.go:138] "Detected node IP" address="10.0.0.232"
I0408 02:26:26.619117 1 server_others.go:572] "Unknown proxy mode, assuming iptables proxy" proxyMode=""
I0408 02:26:26.663053 1 server_others.go:206] "Using iptables Proxier"
...
[root@master231 namespace]# kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
sed -e 's#mode: ""#mode: "ipvs"#' | \
kubectl apply -f - -n kube-system
[root@master231 namespace]# kubectl get pods -n kube-system -l k8s-app=kube-proxy -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-proxy-6jt4j 1/1 Running 2 (25h ago) 2d 10.0.0.232 worker232 <none> <none>
kube-proxy-q5prf 1/1 Running 1 (47h ago) 2d 10.0.0.231 master231 <none> <none>
kube-proxy-vwdqx 1/1 Running 1 (47h ago) 2d 10.0.0.233 worker233 <none> <none>
[root@master231 namespace]#
[root@master231 namespace]#
[root@master231 namespace]# kubectl delete pods -n kube-system -l k8s-app=kube-proxy
pod "kube-proxy-6jt4j" deleted
pod "kube-proxy-q5prf" deleted
pod "kube-proxy-vwdqx" deleted
[root@master231 namespace]#
[root@master231 namespace]# kubectl get pods -n kube-system -l k8s-app=kube-proxy -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-proxy-b28sl 1/1 Running 0 5s 10.0.0.231 master231 <none> <none>
kube-proxy-vftsq 1/1 Running 0 6s 10.0.0.233 worker233 <none> <none>
kube-proxy-xd9fw 1/1 Running 0 5s 10.0.0.232 worker232 <none> <none>
[root@master231 namespace]#
[root@master231 namespace]# kubectl -n kube-system logs kube-proxy-b28sl
I0409 03:30:08.300473 1 node.go:163] Successfully retrieved node IP: 10.0.0.231
I0409 03:30:08.300709 1 server_others.go:138] "Detected node IP" address="10.0.0.231"
I0409 03:30:08.323742 1 server_others.go:269] "Using ipvs Proxier"
I0409 03:30:08.323827 1 server_others.go:271] "Creating dualStackProxier for ipvs"
I0409 03:30:08.323874 1 server_others.go:502] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, , defaulting to no-op detect-local for IPv6"
I0409 03:30:08.325679 1 proxier.go:435] "IPVS scheduler not specified, use rr by default"
I0409 03:30:08.325940 1 proxier.go:435] "IPVS scheduler not specified, use rr by default"
[root@master231 service]# apt -y install ipvsadm
[root@master231 service]#
[root@master231 service]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.200.0.10 <none> 53/UDP,53/TCP,9153/TCP 2d
[root@master231 service]#
[root@master231 service]# kubectl describe svc -n kube-system
Name: kube-dns
Namespace: kube-system
Labels: k8s-app=kube-dns
kubernetes.io/cluster-service=true
kubernetes.io/name=CoreDNS
Annotations: prometheus.io/port: 9153
prometheus.io/scrape: true
Selector: k8s-app=kube-dns
Type: ClusterIP
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.200.0.10
IPs: 10.200.0.10
Port: dns 53/UDP
TargetPort: 53/UDP
Endpoints: 10.100.160.133:53,10.100.160.135:53
Port: dns-tcp 53/TCP
TargetPort: 53/TCP
Endpoints: 10.100.160.133:53,10.100.160.135:53
Port: metrics 9153/TCP
TargetPort: 9153/TCP
Endpoints: 10.100.160.133:9153,10.100.160.135:9153
Session Affinity: None
Events: <none>
[root@master231 service]#
[root@master231 service]#
[root@master231 service]# ipvsadm -ln | grep 10.200.0.10 -A 2
TCP 10.200.0.10:53 rr
-> 10.100.160.133:53 Masq 1 0 0
-> 10.100.160.135:53 Masq 1 0 0
TCP 10.200.0.10:9153 rr
-> 10.100.160.133:9153 Masq 1 0 0
-> 10.100.160.135:9153 Masq 1 0 0
--
UDP 10.200.0.10:53 rr
-> 10.100.160.133:53 Masq 1 0 0
-> 10.100.160.135:53 Masq 1 0 0
[root@master231 service]#
部署Metallb第三方组件
如果我们需要在自己的Kubernetes中暴露LoadBalancer的应用,那么Metallb是一个不错的解决方案。
Metallb官网地址:
https://metallb.universe.tf/installation/
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
sed -e 's#mode: ""#mode: "ipvs"#' | \
kubectl apply -f - -n kube-system
[root@master231 metallb]# wget https://raw.githubusercontent.com/metallb/metallb/v0.14.9/config/manifests/metallb-native.yaml
http://192.168.16.253/Resources/Kubernetes/Add-ons/metallb/v0.14.9/
[root@master231 metallb]# kubectl apply -f metallb-native.yaml
[root@master231 metallb]# watch kubectl get all -o wide -n metallb-system
Every 2.0s: kubectl get all -o wide -n metallb-system master231: Wed Apr 9 11:56:34 2025
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/controller-686c7db689-5859z 1/1 Running 0 2m54s 10.100.203.160 worker232 <none> <none>
pod/speaker-54qjt 1/1 Running 0 2m54s 10.0.0.232 worker232 <none> <none>
pod/speaker-cmnps 1/1 Running 0 2m54s 10.0.0.231 master231 <none> <none>
pod/speaker-qcfpx 1/1 Running 0 2m54s 10.0.0.233 worker233 <none> <none>
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/metallb-webhook-service ClusterIP 10.200.27.157 <none> 443/TCP 2m55s component=controller
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES
SELECTOR
daemonset.apps/speaker 3 3 3 3 3 kubernetes.io/os=linux 2m55s speaker quay.io/met
allb/speaker:v0.14.9 app=metallb,component=speaker
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/controller 1/1 1 1 2m55s controller quay.io/metallb/controller:v0.14.9 app=metallb,com
ponent=controller
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/controller-686c7db689 1 1 1 2m55s controller quay.io/metallb/controller:v0.14.9 app=metal
lb,component=controller,pod-template-hash=686c7db689
[root@master231 metallb]# cat metallb-ip-pool.yaml
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: linux96
namespace: metallb-system
spec:
addresses:
- 10.0.0.150-10.0.0.180
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: violet
namespace: metallb-system
spec:
ipAddressPools:
- linux96
[root@master231 metallb]#
[root@master231 metallb]# kubectl apply -f metallb-ip-pool.yaml
ipaddresspool.metallb.io/linux96 created
l2advertisement.metallb.io/violet created
[root@master231 metallb]#
[root@master231 metallb]# cat deploy-ns-svc.yaml
apiVersion: v1
kind: Namespace
metadata:
name: violet
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-xiuxian
namespace: violet
labels:
apps: xiuxian
spec:
replicas: 5
selector:
matchLabels:
version: v1
template:
metadata:
labels:
version: v1
school: violet
class: linux96
spec:
containers:
- image: registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1
name: xiuxian
---
apiVersion: v1
kind: Service
metadata:
name: svc-xiuxian-lb
namespace: violet
spec:
type: LoadBalancer
ports:
- port: 90
protocol: TCP
targetPort: 80
nodePort: 30120
selector:
version: v1
[root@master231 metallb]#
[root@master231 metallb]#
[root@master231 metallb]# kubectl apply -f deploy-ns-svc.yaml
[root@master231 metallb]#
[root@master231 metallb]# kubectl get svc -n violet
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-xiuxian-lb LoadBalancer 10.200.150.228 10.0.0.150 90:30120/TCP 7s
[root@master231 metallb]#
[root@master231 metallb]# kubectl get deploy,svc,rs,po,svc -n violet
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-xiuxian 5/5 5 5 35s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/svc-xiuxian-lb LoadBalancer 10.200.150.228 10.0.0.150 90:30120/TCP 15s
NAME DESIRED CURRENT READY AGE
replicaset.apps/deploy-xiuxian-9ddcfd7db 5 5 5 35s
NAME READY STATUS RESTARTS AGE
pod/deploy-xiuxian-9ddcfd7db-87v6w 1/1 Running 0 35s
pod/deploy-xiuxian-9ddcfd7db-gczbw 1/1 Running 0 35s
pod/deploy-xiuxian-9ddcfd7db-pjhxs 1/1 Running 0 35s
pod/deploy-xiuxian-9ddcfd7db-vnf9x 1/1 Running 0 35s
pod/deploy-xiuxian-9ddcfd7db-wfbqp 1/1 Running 0 35s
[root@master231 metallb]#
访问测试:
基于NodePort端口访问
http://10.0.0.231:30120/
http://10.0.0.232:30120/
http://10.0.0.233:30120/
基于LoadBalancer访问
10.0.0.150:90
CoreDNS附加组件
coreDNS的作用就是将svc的名称解析为ClusterIP。还可以实现Pod的负载均衡。
早期使用的skyDNS组件,需要单独部署,在k8s 1.9版本中,我们就可以直接使用kubeadm方式安装CoreDNS组件。
从k8s 1.12开始,CoreDNS就成为kubernetes默认的DNS服务器,但是kubeadm支持coreDNS的时间会更早。
推荐阅读:
https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns/coredns
vim /var/lib/kubelet/config.yaml
...
clusterDNS:
- 10.200.0.10
clusterDomain: violet.com
k8s的A记录格式:
<service name>[.<namespace name>.svc.cluster.local]
参考案例:
kube-dns.kube-system.svc.cluster.local
violet-mysql.default.svc.cluster.local
温馨提示:
(1)如果部署时直接写svc的名称,不写名称空间,则默认的名称空间为其引用资源的名称空间;
(2)kubeadm部署时,无需手动配置CoreDNS组件(默认在kube-system已创建),二进制部署时,需要手动安装该组件;
方式一
直接使用alpine取ping您想测试的SVC名称即可,观察能否解析成对应的VIP即可。
[root@master231 metallb]# kubectl get svc -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
calico-apiserver calico-api ClusterIP 10.200.87.86 <none> 443/TCP 2d2h
calico-system calico-kube-controllers-metrics ClusterIP None <none> 9094/TCP 2d2h
calico-system calico-typha ClusterIP 10.200.43.178 <none> 5473/TCP 2d2h
default kubernetes ClusterIP 10.200.0.1 <none> 443/TCP 2d3h
default rc-xiuxian ClusterIP 10.200.196.245 <none> 80/TCP 23h
default svc-xiuxian ClusterIP 10.200.25.148 <none> 90/TCP 22h
default svc-xiuxian-nodeport NodePort 10.200.21.38 <none> 90:30090/TCP 21h
kube-system kube-dns ClusterIP 10.200.0.10 <none> 53/UDP,53/TCP,9153/TCP 2d3h
metallb-system metallb-webhook-service ClusterIP 10.200.27.157 <none> 443/TCP 164m
violet svc-xiuxian-lb LoadBalancer 10.200.150.228 10.0.0.150 90:30120/TCP 157m
[root@master231 metallb]#
[root@master231 metallb]# kubectl run test-dns-01 --rm -it --image=registry.cn-hangzhou.aliyuncs.com/lax-k8s/apps:v1 -- sh
If you don't see a command prompt, try pressing enter.
/ # ping rc-xiuxian
PING rc-xiuxian (10.200.196.245): 56 data bytes
64 bytes from 10.200.196.245: seq=0 ttl=64 time=0.156 ms
64 bytes from 10.200.196.245: seq=1 ttl=64 time=0.068 ms
^C
--- rc-xiuxian ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.068/0.112/0.156 ms
/ #
/ # ping metallb-webhook-service.metallb-system
PING metallb-webhook-service.metallb-system (10.200.27.157): 56 data bytes
64 bytes from 10.200.27.157: seq=0 ttl=64 time=0.172 ms
64 bytes from 10.200.27.157: seq=1 ttl=64 time=0.119 ms
64 bytes from 10.200.27.157: seq=2 ttl=64 time=0.084 ms
64 bytes from 10.200.27.157: seq=3 ttl=64 time=0.093 ms
^C
--- metallb-webhook-service.metallb-system ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.084/0.117/0.172 ms
/ #
/ # ping metallb-webhook-service.metallb-system.svc.violet.com
PING metallb-webhook-service.metallb-system.svc.violet.com (10.200.27.157): 56 data bytes
64 bytes from 10.200.27.157: seq=0 ttl=64 time=0.034 ms
64 bytes from 10.200.27.157: seq=1 ttl=64 time=0.078 ms
^C
--- metallb-webhook-service.metallb-system.svc.violet.com ping statistics ---
2 packets transmitted, 2 packets received, 0% packet loss
round-trip min/avg/max = 0.034/0.056/0.078 ms
/ #
方式二
apt -y install bind-utils
dig @10.254.0.10 metallb-webhook-service.metallb-system.svc.violet.com +short
[root@master231 metallb]# kubectl get svc -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
calico-apiserver calico-api ClusterIP 10.200.87.86 <none> 443/TCP 2d2h
calico-system calico-kube-controllers-metrics ClusterIP None <none> 9094/TCP 2d2h
calico-system calico-typha ClusterIP 10.200.43.178 <none> 5473/TCP 2d2h
default kubernetes ClusterIP 10.200.0.1 <none> 443/TCP 2d3h
default rc-xiuxian ClusterIP 10.200.196.245 <none> 80/TCP 23h
default svc-xiuxian ClusterIP 10.200.25.148 <none> 90/TCP 22h
default svc-xiuxian-nodeport NodePort 10.200.21.38 <none> 90:30090/TCP 21h
kube-system kube-dns ClusterIP 10.200.0.10 <none> 53/UDP,53/TCP,9153/TCP 2d3h
metallb-system metallb-webhook-service ClusterIP 10.200.27.157 <none> 443/TCP 167m
violet svc-xiuxian-lb LoadBalancer 10.200.150.228 10.0.0.150 90:30120/TCP 160m
[root@master231 metallb]#
[root@master231 metallb]# dig @10.200.0.10 metallb-webhook-service.metallb-system.svc.violet.com +short
10.200.27.157
[root@master231 metallb]#
[root@master231 metallb]# dig @10.200.0.10 calico-api.calico-apiserver.svc.violet.com +short
10.200.87.86
[root@master231 metallb]#
[root@master231 metallb]# dig @10.200.0.10 kubernetes.default.svc.violet.com +short
10.200.0.1
[root@master231 metallb]#
方式三:
[root@master231 metallb]# host calico-api.calico-apiserver.svc.violet.com 10.200.0.10
Using domain server:
Name: 10.200.0.10
Address: 10.200.0.10#53
Aliases:
calico-api.calico-apiserver.svc.violet.com has address 10.200.87.86
[root@master231 metallb]#
[root@master231 metallb]# kubectl get deploy,rs,svc,pods -n kube-system -l k8s-app=kube-dns -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
deployment.apps/coredns 2/2 2 2 2d3h coredns registry.aliyuncs.com/google_containers/coredns:v1.8.6 k8s-app=kube-dns
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
replicaset.apps/coredns-6d8c4cb4d 2 2 2 2d3h coredns registry.aliyuncs.com/google_containers/coredns:v1.8.6 k8s-app=kube-dns,pod-template-hash=6d8c4cb4d
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service/kube-dns ClusterIP 10.200.0.10 <none> 53/UDP,53/TCP,9153/TCP 2d3h k8s-app=kube-dns
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod/coredns-6d8c4cb4d-bknzr 1/1 Running 1 (2d2h ago) 2d3h 10.100.160.135 master231 <none> <none>
pod/coredns-6d8c4cb4d-cvp9w 1/1 Running 1 (2d2h ago) 2d3h 10.100.160.133 master231 <none> <none>
[root@master231 metallb]#
[root@master231 metallb]# kubectl -n kube-system describe svc kube-dns
Name: kube-dns
Namespace: kube-system
Labels: k8s-app=kube-dns
kubernetes.io/cluster-service=true
kubernetes.io/name=CoreDNS
Annotations: prometheus.io/port: 9153
prometheus.io/scrape: true
Selector: k8s-app=kube-dns
Type: ClusterIP
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.200.0.10
IPs: 10.200.0.10
Port: dns 53/UDP
TargetPort: 53/UDP
Endpoints: 10.100.160.133:53,10.100.160.135:53
Port: dns-tcp 53/TCP
TargetPort: 53/TCP
Endpoints: 10.100.160.133:53,10.100.160.135:53
Port: metrics 9153/TCP
TargetPort: 9153/TCP
Endpoints: 10.100.160.133:9153,10.100.160.135:9153
Session Affinity: None
Events: <none>
[root@master231 metallb]#
CoreDns组件优化WordPress实战
[root@worker232 ~]# docker tag mysql:8.0.36-oracle harbor250.violet.com/violet-wp/mysql:8.0.36-oracle
[root@worker232 ~]#
[root@worker232 ~]# docker push harbor250.violet.com/violet-wp/mysql:8.0.36-oracle
The push refers to repository [harbor250.violet.com/violet-wp/mysql]
318dde184d61: Pushed
1c0ff7ed67c4: Pushed
876b8cd855eb: Pushed
84d659420bad: Pushed
9513d2aedd12: Pushed
eaa1e85de732: Pushed
a6909c467615: Pushed
5b76076a2dd4: Pushed
fb5c92e924ab: Pushed
152c1ecea280: Pushed
fc037c17567d: Pushed
8.0.36-oracle: digest: sha256:c57363379dee26561c2e554f82e70704be4c8129bd0d10e29252cc0a34774004 size: 2618
[root@worker232 ~]#
[root@worker233 ~]# docker tag wordpress:6.7.1-php8.1-apache harbor250.violet.com/violet-wp/wordpress:6.7.1-php8.1-apache
[root@worker233 ~]#
[root@worker233 ~]# docker push harbor250.violet.com/violet-wp/wordpress:6.7.1-php8.1-apache
The push refers to repository [harbor250.violet.com/violet-wp/wordpress]
5a91ae3138b2: Pushed
3a7d623958af: Pushed
9dfe5f929ccc: Pushed
10ffebd37647: Pushed
fd6f751879ec: Pushed
1bd5766fdd49: Pushed
7aa076c583ee: Pushed
dd20169e4636: Pushed
5f70bf18a086: Pushed
541b75dced10: Pushed
72d18aad6507: Pushed
6a874987401a: Pushed
65ed9c32ccf8: Pushed
cd29cc24986e: Pushed
683fadaa2d15: Pushed
41a48fee6648: Pushed
e1862c15b46e: Pushed
76c322751b28: Pushed
93531ad2cad2: Pushed
cca374cc7ecc: Pushed
9d3505e94f88: Pushed
7914c8f600f5: Pushed
6.7.1-php8.1-apache: digest: sha256:07c5a73891236eed540e68c8cdc819a24fe617fa81259ee22be3105daefa3ee1 size: 4917
[root@worker233 ~]#
[root@master231 deployments]# cat 03-deploy-wordpress.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-db
spec:
replicas: 1
selector:
matchLabels:
apps: db
template:
metadata:
labels:
apps: db
spec:
containers:
- image: harbor250.violet.com/violet-wp/mysql:8.0.36-oracle
name: db
env:
- name: MYSQL_DATABASE
value: wordpress
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "yes"
- name: MYSQL_USER
value: admin
- name: MYSQL_PASSWORD
value: lax
args:
- --character-set-server=utf8
- --collation-server=utf8_bin
- --default-authentication-plugin=mysql_native_password
---
apiVersion: v1
kind: Service
metadata:
name: svc-db
spec:
ports:
- port: 3306
selector:
apps: db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-wp
spec:
replicas: 1
selector:
matchLabels:
apps: wp
template:
metadata:
labels:
apps: wp
spec:
containers:
- image: harbor250.violet.com/violet-wp/wordpress:6.7.1-php8.1-apache
name: wp
env:
- name: WORDPRESS_DB_HOST
value: svc-db
- name: WORDPRESS_DB_NAME
value: wordpress
- name: WORDPRESS_DB_USER
value: admin
- name: WORDPRESS_DB_PASSWORD
value: lax
---
apiVersion: v1
kind: Service
metadata:
name: svc-wp-lb
spec:
type: LoadBalancer
ports:
- port: 80
nodePort: 30130
selector:
apps: wp
[root@master231 deployments]#
[root@master231 deployments]# kubectl apply -f 03-deploy-wordpress.yaml
deployment.apps/deploy-db created
service/svc-db created
deployment.apps/deploy-wp created
service/svc-wp-lb created
[root@master231 deployments]#
[root@master231 deployments]# kubectl get -f 03-deploy-wordpress.yaml
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-db 1/1 1 1 4s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/svc-db ClusterIP 10.200.82.128 <none> 3306/TCP 4s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-wp 1/1 1 1 4s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/svc-wp-lb LoadBalancer 10.200.168.21 10.0.0.151 80:30130/TCP 4s
[root@master231 deployments]#
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-db-c4c857cc8-wpnpj 1/1 Running 0 7s 10.100.203.167 worker232 <none> <none>
deploy-wp-6b75764d48-lq4rp 1/1 Running 0 7s 10.100.140.124 worker233 <none> <none>
[root@master231 deployments]#
[root@master231 deployments]# cat 04-deploy-wordpress-diff-ns.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-db
namespace: violet
spec:
replicas: 1
selector:
matchLabels:
apps: db
template:
metadata:
labels:
apps: db
spec:
containers:
- image: harbor250.violet.com/violet-wp/mysql:8.0.36-oracle
name: db
env:
- name: MYSQL_DATABASE
value: wordpress
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "yes"
- name: MYSQL_USER
value: admin
- name: MYSQL_PASSWORD
value: lax
args:
- --character-set-server=utf8
- --collation-server=utf8_bin
- --default-authentication-plugin=mysql_native_password
---
apiVersion: v1
kind: Service
metadata:
name: svc-db
namespace: violet
spec:
ports:
- port: 3306
selector:
apps: db
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-wp
namespace: default
spec:
replicas: 1
selector:
matchLabels:
apps: wp
template:
metadata:
labels:
apps: wp
spec:
containers:
- image: harbor250.violet.com/violet-wp/wordpress:6.7.1-php8.1-apache
name: wp
env:
- name: WORDPRESS_DB_HOST
# 适合在同一个名称空间的简写形式
# value: svc-db
# 适合在不同名称空间的简写形式
# value: svc-db.violet
value: svc-db.violet.svc.violet.com
- name: WORDPRESS_DB_NAME
value: wordpress
- name: WORDPRESS_DB_USER
value: admin
- name: WORDPRESS_DB_PASSWORD
value: lax
---
apiVersion: v1
kind: Service
metadata:
name: svc-wp-lb
namespace: default
spec:
type: LoadBalancer
ports:
- port: 80
nodePort: 30130
selector:
apps: wp
[root@master231 deployments]#
[root@master231 deployments]#
[root@master231 deployments]# kubectl apply -f 04-deploy-wordpress-diff-ns.yaml
deployment.apps/deploy-db created
service/svc-db created
deployment.apps/deploy-wp created
service/svc-wp-lb created
[root@master231 deployments]#
[root@master231 deployments]# kubectl get -f 04-deploy-wordpress-diff-ns.yaml
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-db 1/1 1 1 5s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/svc-db ClusterIP 10.200.86.142 <none> 3306/TCP 5s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-wp 1/1 1 1 5s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/svc-wp-lb LoadBalancer 10.200.229.158 10.0.0.151 80:30130/TCP 5s
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-wp-67986c5c57-68m9w 1/1 Running 0 10s 10.100.140.127 worker233 <none> <none>
[root@master231 deployments]#
[root@master231 deployments]#
[root@master231 deployments]# kubectl get pods -o wide -n violet -l apps=db
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-db-c4c857cc8-blpsc 1/1 Running 0 15s 10.100.203.169 worker232 <none> <none>
[root@master231 deployments]#
ExternalName类型实现K8S集群外部服务映射
[root@master231 service]# cat 04-svc-ExternalName.yaml
apiVersion: v1
kind: Service
metadata:
name: svc-externalname
spec:
# externalName: www.baidu.com
externalName: www.cnblogs.com
type: ExternalName
[root@master231 service]#
[root@master231 service]# kubectl apply -f 04-svc-ExternalName.yaml
service/svc-externalname created
[root@master231 service]#
[root@master231 service]# kubectl get -f 04-svc-ExternalName.yaml
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-externalname ExternalName <none> www.cnblogs.com <none> 3s
[root@master231 service]#
[root@master231 service]# dig @10.200.0.10 svc-externalname.default.svc.violet.com +short
www.cnblogs.com.
118.31.180.41
101.37.225.65
[root@master231 service]#
[root@master231 service]# ping www.cnblogs.com -c 3
PING www.cnblogs.com (118.31.180.41) 56(84) bytes of data.
64 bytes from 118.31.180.41 (118.31.180.41): icmp_seq=1 ttl=128 time=26.9 ms
64 bytes from 118.31.180.41 (118.31.180.41): icmp_seq=2 ttl=128 time=26.7 ms
64 bytes from 118.31.180.41 (118.31.180.41): icmp_seq=3 ttl=128 time=26.9 ms
--- www.cnblogs.com ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2005ms
rtt min/avg/max/mdev = 26.724/26.839/26.931/0.086 ms
[root@master231 service]#
[root@master231 service]# ping www.cnblogs.com -c 3
PING www.cnblogs.com (101.37.225.65) 56(84) bytes of data.
64 bytes from 101.37.225.65 (101.37.225.65): icmp_seq=1 ttl=128 time=31.1 ms
64 bytes from 101.37.225.65 (101.37.225.65): icmp_seq=2 ttl=128 time=30.8 ms
64 bytes from 101.37.225.65 (101.37.225.65): icmp_seq=3 ttl=128 time=30.6 ms
--- www.cnblogs.com ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2003ms
rtt min/avg/max/mdev = 30.553/30.834/31.120/0.231 ms
[root@master231 service]#
endpoints实现K8S集群外部服务映射
endpoints简称ep,表示端点的意思,用户存储一个或多个IP地址及端口列表。
除了ExternalName外的svc类型,在创建时都会创建一个同名称的ep资源。
当svc删除时会自动删除该ep。
我们如果想要将局域网内部的某个服务器的服务映射到K8S集群内部的某个svc,我们的思路可以先创建一个ep,而后创建一个同名称的svc与之关联即可。
[root@master231 service]# kubectl get svc -A
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
calico-apiserver calico-api ClusterIP 10.200.87.86 <none> 443/TCP 2d4h
calico-system calico-kube-controllers-metrics ClusterIP None <none> 9094/TCP 2d4h
calico-system calico-typha ClusterIP 10.200.43.178 <none> 5473/TCP 2d4h
default baidu ExternalName <none> www.cnblogs.com <none> 10m
default kubernetes ClusterIP 10.200.0.1 <none> 443/TCP 2d4h
default rc-xiuxian ClusterIP 10.200.196.245 <none> 80/TCP 24h
default svc-externalname ExternalName <none> www.cnblogs.com <none> 8m51s
default svc-wp-lb LoadBalancer 10.200.229.158 10.0.0.151 80:30130/TCP 40m
default svc-xiuxian ClusterIP 10.200.25.148 <none> 90/TCP 24h
default svc-xiuxian-nodeport NodePort 10.200.21.38 <none> 90:30090/TCP 22h
kube-system kube-dns ClusterIP 10.200.0.10 <none> 53/UDP,53/TCP,9153/TCP 2d4h
metallb-system metallb-webhook-service ClusterIP 10.200.27.157 <none> 443/TCP 3h59m
violet svc-db ClusterIP 10.200.86.142 <none> 3306/TCP 40m
violet svc-xiuxian-lb LoadBalancer 10.200.150.228 10.0.0.150 90:30120/TCP 3h52m
[root@master231 service]#
[root@master231 service]# kubectl get endpoints -A
NAMESPACE NAME ENDPOINTS AGE
calico-apiserver calico-api 10.100.140.67:5443,10.100.203.144:5443 2d4h
calico-system calico-kube-controllers-metrics 10.100.160.134:9094 2d4h
calico-system calico-typha 10.0.0.232:5473,10.0.0.233:5473 2d4h
default kubernetes 10.0.0.231:6443 2d4h
default rc-xiuxian <none> 24h
default svc-wp-lb 10.100.140.127:80 39m
default svc-xiuxian <none> 24h
default svc-xiuxian-nodeport <none> 22h
kube-system kube-dns 10.100.160.133:53,10.100.160.135:53,10.100.160.133:53 + 3 more... 2d4h
metallb-system metallb-webhook-service 10.100.203.160:9443 3h58m
violet svc-db 10.100.203.169:3306 39m
violet svc-xiuxian-lb 10.100.140.119:80,10.100.140.120:80,10.100.203.161:80 + 2 more... 3h51m
[root@master231 service]#
[root@master231 service]#
[root@master231 service]# kubectl -n violet describe svc svc-xiuxian-lb
Name: svc-xiuxian-lb
Namespace: violet
Labels: <none>
Annotations: metallb.io/ip-allocated-from-pool: linux96
Selector: version=v1
Type: LoadBalancer
IP Family Policy: SingleStack
IP Families: IPv4
IP: 10.200.150.228
IPs: 10.200.150.228
LoadBalancer Ingress: 10.0.0.150
Port: <unset> 90/TCP
TargetPort: 80/TCP
NodePort: <unset> 30120/TCP
Endpoints: 10.100.140.119:80,10.100.140.120:80,10.100.203.161:80 + 2 more...
Session Affinity: None
External Traffic Policy: Cluster
Events: <none>
[root@master231 service]#
[root@master231 service]# kubectl -n violet describe ep svc-xiuxian-lb
Name: svc-xiuxian-lb
Namespace: violet
Labels: <none>
Annotations: endpoints.kubernetes.io/last-change-trigger-time: 2025-04-09T04:00:15Z
Subsets:
Addresses: 10.100.140.119,10.100.140.120,10.100.203.161,10.100.203.162,10.100.203.164
NotReadyAddresses: <none>
Ports:
Name Port Protocol
---- ---- --------
<unset> 80 TCP
Events: <none>
[root@master231 service]#
3.1 在K8S集群外部部署MySQL数据库
[root@harbor250.violet.com ~]# scp -r 10.0.0.231:/etc/docker/certs.d/ /etc/docker/
[root@harbor250.violet.com ~]# docker run -d --name mysql-server --network host -e MYSQL_DATABASE=wordpress -e MYSQL_ALLOW_EMPTY_PASSWORD=yes -e MYSQL_USER=admin -e MYSQL_PASSWORD=lax harbor250.violet.com/violet-wp/mysql:8.0.36-oracle --character-set-server=utf8 --collation-server=utf8_bin --default-authentication-plugin=mysql_native_password
[root@harbor250.violet.com ~]# ss -ntl | grep 3306
LISTEN 0 70 *:33060 *:*
LISTEN 0 151 *:3306 *:*
[root@harbor250.violet.com ~]#
[root@harbor250.violet.com ~]# docker exec -it mysql-server mysql wordpress
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 9
Server version: 8.0.36 MySQL Community Server - GPL
Copyright (c) 2000, 2024, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> SHOW TABLES;
Empty set (0.00 sec)
mysql>
3.2 编写资源清单
[root@master231 endpoints]# cat 01-ep-svc-deploy.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: ep-db
subsets:
- addresses:
- ip: 10.0.0.250
ports:
- port: 3306
---
apiVersion: v1
kind: Service
metadata:
name: ep-db
spec:
type: ClusterIP
ports:
- port: 3306
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: deploy-wp
namespace: default
spec:
replicas: 1
selector:
matchLabels:
apps: wp
template:
metadata:
labels:
apps: wp
spec:
containers:
- image: harbor250.violet.com/violet-wp/wordpress:6.7.1-php8.1-apache
name: wp
env:
- name: WORDPRESS_DB_HOST
value: ep-db
- name: WORDPRESS_DB_NAME
value: wordpress
- name: WORDPRESS_DB_USER
value: admin
- name: WORDPRESS_DB_PASSWORD
value: lax
---
apiVersion: v1
kind: Service
metadata:
name: svc-wp-lb
namespace: default
spec:
type: LoadBalancer
ports:
- port: 80
selector:
apps: wp
[root@master231 endpoints]#
[root@master231 endpoints]# kubectl apply -f 01-ep-svc-deploy.yaml
endpoints/ep-db created
service/ep-db created
deployment.apps/deploy-wp created
service/svc-wp-lb created
[root@master231 endpoints]#
[root@master231 endpoints]# kubectl get -f 01-ep-svc-deploy.yaml
NAME ENDPOINTS AGE
endpoints/ep-db 10.0.0.250:3306 4s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/ep-db ClusterIP 10.200.2.2 <none> 3306/TCP 4s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/deploy-wp 1/1 1 1 4s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/svc-wp-lb LoadBalancer 10.200.77.193 10.0.0.151 80:32203/TCP 4s
[root@master231 endpoints]#
[root@master231 endpoints]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
deploy-wp-6666c4477c-5zc8q 1/1 Running 0 60s 10.100.140.66 worker233 <none> <none>
[root@master231 endpoints]#
3.3 测试验证
略,
[root@harbor250.violet.com ~]# docker exec -it mysql-server mysql wordpress
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 9
Server version: 8.0.36 MySQL Community Server - GPL
Copyright (c) 2000, 2024, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql>
mysql> SHOW TABLES;
+-----------------------+
| Tables_in_wordpress |
+-----------------------+
| wp_commentmeta |
| wp_comments |
| wp_links |
| wp_options |
| wp_postmeta |
| wp_posts |
| wp_term_relationships |
| wp_term_taxonomy |
| wp_termmeta |
| wp_terms |
| wp_usermeta |
| wp_users |
+-----------------------+
12 rows in set (0.00 sec)
mysql>
svc的NodePort端口范围修改
[root@master231 service]# cat 02-svc-NodePort-xiuxian.yaml
apiVersion: v1
kind: Service
metadata:
labels:
apps: xiuxian
name: svc-xiuxian-nodeport
spec:
type: NodePort
ports:
- port: 90
protocol: TCP
targetPort: 80
# 声明worker节点的转发的端口,默认的有效范围是: 30000-32767
# nodePort: 30080
nodePort: 8080
selector:
version: v1
[root@master231 service]#
[root@master231 service]# kubectl apply -f 02-svc-NodePort-xiuxian.yaml
The Service "svc-xiuxian-nodeport" is invalid: spec.ports[0].nodePort: Invalid value: 8080: provided port is not in the valid range. The range of valid ports is 30000-32767
[root@master231 service]#
推荐阅读:
https://kubernetes.io/zh-cn/docs/reference/command-line-tools-reference/kube-apiserver/
[root@master231 ~]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
...
spec:
containers:
- command:
- kube-apiserver
- --service-node-port-range=3000-50000 # 进行添加这一行即可
...
[root@master231 manifests]# pwd
/etc/kubernetes/manifests
[root@master231 manifests]#
[root@master231 manifests]# mv kube-apiserver.yaml /opt/
[root@master231 manifests]#
[root@master231 manifests]# mv /opt/kube-apiserver.yaml ./
[root@master231 manifests]#
[root@master231 manifests]# kubectl get pods -o wide -n kube-system -l tier=control-plane
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
etcd-master231 1/1 Running 1 (2d5h ago) 2d5h 10.0.0.231 master231 <none> <none>
kube-apiserver-master231 1/1 Running 0 57s 10.0.0.231 master231 <none> <none>
kube-controller-manager-master231 1/1 Running 2 (85s ago) 2d5h 10.0.0.231 master231 <none> <none>
kube-scheduler-master231 1/1 Running 2 (85s ago) 2d5h 10.0.0.231 master231 <none> <none>
[root@master231 manifests]#
[root@master231 service]# cat 02-svc-NodePort-xiuxian.yaml
apiVersion: v1
kind: Service
metadata:
labels:
apps: xiuxian
name: svc-xiuxian-nodeport
spec:
type: NodePort
ports:
- port: 90
protocol: TCP
targetPort: 80
# 声明worker节点的转发的端口,默认的有效范围是: 30000-32767
# nodePort: 30080
nodePort: 8080
selector:
version: v1
[root@master231 service]#
[root@master231 service]# kubectl apply -f 02-svc-NodePort-xiuxian.yaml
service/svc-xiuxian-nodeport configured
[root@master231 service]#
[root@master231 service]# kubectl get -f 02-svc-NodePort-xiuxian.yaml
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-xiuxian-nodeport NodePort 10.200.21.38 <none> 90:8080/TCP 23h
[root@master231 service]#
kubeadm底层实现之静态Pod
所谓的静态pod就是kubelet自己监视的一个目录,如果这个目录有Pod资源清单,就直接会在当前节点上创建该Pod。也就是说不基于APIServer就可以直接创建Pod。
静态Pod仅对Pod类型的资源有效,其他资源无视。
静态Pod创建的资源,后缀都会加一个当前节点的名称。
vim /var/lib/kubelet/config.yaml
...
staticPodPath: /etc/kubernetes/manifests
温馨提示:
(1)静态Pod是由kubelet启动时通过"staticPodPath"配置参数指定路径
(2)静态Pod创建的Pod名称会自动加上kubelet节点的主机名,比如"-k8s231.violet.com",会忽略"nodeName"字段哟;
(3)静态Pod的创建并不依赖API-Server,而是直接基于kubelet所在节点来启动Pod;
(4)静态Pod的删除只需要将其从staticPodPath指定的路径移除即可;
(5)静态Pod路径仅对Pod资源类型有效,其他类型资源将不被创建哟
(6)咱们的kubeadm部署方式就是基于静态Pod部署的哟;
[root@master231 ~]# grep staticPodPath /var/lib/kubelet/config.yaml
staticPodPath: /etc/kubernetes/manifests
[root@master231 ~]#
[root@master231 ~]# cd /etc/kubernetes/manifests/
[root@master231 manifests]#
[root@master231 manifests]# ll
total 24
drwxr-xr-x 2 root root 4096 Apr 7 11:00 ./
drwxr-xr-x 4 root root 4096 Apr 7 11:00 ../
-rw------- 1 root root 2280 Apr 7 11:00 etcd.yaml
-rw------- 1 root root 4025 Apr 7 11:00 kube-apiserver.yaml
-rw------- 1 root root 3546 Apr 7 11:00 kube-controller-manager.yaml
-rw------- 1 root root 1465 Apr 7 11:00 kube-scheduler.yaml
[root@master231 manifests]#
[root@master231 manifests]# head *
==> etcd.yaml <==
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/etcd.advertise-client-urls: https://10.0.0.231:2379
creationTimestamp: null
labels:
component: etcd
tier: control-plane
name: etcd
==> kube-apiserver.yaml <==
apiVersion: v1
kind: Pod
metadata:
annotations:
kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 10.0.0.231:6443
creationTimestamp: null
labels:
component: kube-apiserver
tier: control-plane
name: kube-apiserver
==> kube-controller-manager.yaml <==
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-controller-manager
tier: control-plane
name: kube-controller-manager
namespace: kube-system
spec:
==> kube-scheduler.yaml <==
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
component: kube-scheduler
tier: control-plane
name: kube-scheduler
namespace: kube-system
spec:
[root@master231 manifests]#
[root@master231 manifests]# kubectl get pods -o wide -n kube-system -l tier=control-plane
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
etcd-master231 1/1 Running 1 (2d4h ago) 2d5h 10.0.0.231 master231 <none> <none>
kube-apiserver-master231 1/1 Running 1 (2d4h ago) 2d5h 10.0.0.231 master231 <none> <none>
kube-controller-manager-master231 1/1 Running 1 (2d4h ago) 2d5h 10.0.0.231 master231 <none> <none>
kube-scheduler-master231 1/1 Running 1 (2d4h ago) 2d5h 10.0.0.231 master231 <none> <none>
[root@master231 manifests]#
Continue Reading