1 Namespace
名称空间,用于将资源隔离
1)使用命令行的方式
# 创建命名空间
kubectl create ns my-system
# 查看命名空间(default为默认的命名空间)
kubectl get ns
# 删除明明空间(会删除该命名空间下的全部资源)
kubectl delete ns my-system
2)使用yaml的方式
# my-ns.yml
apiVersion: v1
kind: Namespace
metadata:
name: my-system
# 应用配置文件的资源
kubectl apply -f my-ns.yml
# 删除配置文件的资源
kubectl delete -f my-ns.yml
2 Pod
运行中的一组容器,Pod是k8s的最小单位
1)使用命令行的方式
# 创建nginx的pod,默认是使用deloyment创建
kubectl run mynginx --image=nginx --generator=run-pod/v1
# 查看default名称空间的Pod(-n 命名空间)
kubectl get pod
# 每个Pod - k8s都会分配一个ip,通过 -owide 可以查看 pod 的详细信息
kubectl get pod -owide
# 描述,查看pod的创建过程,可用于排除,查找pod创建出现问题的阶段
kubectl describe pod 'Pod名字'
# 查看Pod的运行日志
kubectl logs 'Pod名字'
# 使用Pod的ip+pod里面运行容器的端口
# 集群中的任意一个机器以及任意的应用都能通过Pod分配的ip来访问这个Pod
curl 10.244.2.3
2)使用yaml的方式
# my-pod.yml
apiVersion: v1
kind: Pod
metadata:
labels:
run: mynginx
name: mynginx
# namespace: default
spec:
containers:
- image: nginx
name: mynginx
# my-pod2.yml pod中可以包含多个container
apiVersion: v1
kind: Pod
metadata:
labels:
run: myapp
name: myapp
spec:
containers:
- image: nginx
name: nginx
- image: tomcat:8.5.68
name: tomcat
3 Deployment
控制Pod,使Pod拥有多个副本,自愈和扩缩容等能力。
- 多副本
1)使用命令行的方式
# 创建1个Pod副本的deploy
kubectl create deployment my-dep --image=nginx
2)使用yaml的方式
# my-deploy.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: my-dep
name: my-dep
spec:
replicas: 3
selector:
matchLabels:
app: my-dep
template:
metadata:
labels:
app: my-dep
spec:
containers:
- image: nginx
name: nginx
- 扩缩容
# 修改 replicas 的值,可以实现自动的Pod扩缩容
kubectl edit deploy my-dep
- 自愈&故障转移
停机;删除Pod;容器崩溃
- 滚动更新
# 串形化更新当前指定的Pod
kubectl set image deployment/my-dep nginx=nginx:1.16.1 --record
# 查看滚动更新的状态
kubectl rollout status deployment/my-dep
- 版本回退
# 查看版本的历史记录
kubectl rollout history deployment/my-dep
# 查看某个历史版本详情
kubectl rollout history deployment/my-dep --revision=2
# 回滚(回到上次)
kubectl rollout undo deployment/my-dep
# 回滚(回到指定版本)
kubectl rollout undo deployment/my-dep --to-revision=2
4 Service
将一组Pod公开为网络服务的抽象方法
- ClusterIP(集群模式,集群内部使用,不暴露外部访问端口)
1)使用命令行的方式
# 暴露Deploy 将pod端口为80的服务暴露至8000端口 (默认--type=ClusterIP)
kubectl expose deploy my-dep --port=8000 --target-port=80
# 使用标签检索Pod
kubectl get pod -l app=my-dep
2)使用yaml的方式
# my-service-clusterip.yml
apiVersion: v1
kind: Service
metadata:
labels:
app: my-dep
name: my-dep
spec:
selector:
app: my-dep
ports:
- port: 8000
protocol: TCP
targetPort: 80
实验Demo,通过svc实现负载均衡测试
通过deploy部署3个nginx副本,并通过svc指定该组pod的公开网络服务。
# 当前环境的pod情况
root@master:~# kubectl get pod
NAME READY STATUS RESTARTS AGE
my-dep-6589b6f7dc-fghff 1/1 Running 0 5m16s
my-dep-6589b6f7dc-fpcs4 1/1 Running 0 5m16s
my-dep-6589b6f7dc-gmbjg 1/1 Running 0 5m16s
# 当前环境的svc情况
root@master:~# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2d2h
my-dep ClusterIP 10.99.115.245 <none> 8000/TCP 4m54s
# 修改容器内nginx的index页面(页面内容分别为1111,2222,3333)
root@master:~# kubectl exec -it my-dep-6589b6f7dc-fghff /bin/bash
root@my-dep-6589b6f7dc-fghff:/# cd /usr/share/nginx/html/
root@my-dep-6589b6f7dc-fghff:/usr/share/nginx/html# echo 1111 > index.html
root@my-dep-6589b6f7dc-fghff:/usr/share/nginx/html# cat index.html
1111
root@my-dep-6589b6f7dc-fghff:/usr/share/nginx/html# exit
exit
# 查看负载均衡的效果
root@master:~# curl 10.99.115.245:8000
2222
root@master:~# curl 10.99.115.245:8000
1111
root@master:~# curl 10.99.115.245:8000
1111
root@master:~# curl 10.99.115.245:8000
3333
root@master:~# curl 10.99.115.245:8000
3333
root@master:~# curl 10.99.115.245:8000
3333
root@master:~# curl 10.99.115.245:8000
3333
root@master:~# curl 10.99.115.245:8000
1111
- NodePort(暴露外部端口,NodePort范围在 30000-32767 之间)
1)使用命令行的方式
kubectl expose deploy my-dep --port=8000 --target-port=80 --type=NodePort
2)使用yaml的方式
# my-svc-nodeport.yml
apiVersion: v1
kind: Service
metadata:
labels:
app: my-dep
name: my-dep
spec:
ports:
- port: 8000
protocol: TCP
targetPort: 80
selector:
app: my-dep
type: NodePort
5 PV&PVC
5.1 nfs环境准备
# nfs server节点
# 1.安装nfs服务
apt install nfs-kernel-server
# 2.创建挂载目录
mkdir -p /nfs/data
# 3.暴露挂载目录
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
# 4.配置生效
systemctl enable rpcbind --now
systemctl enable nfs-server --now
exportfs -r
# nfs client节点
# 1.安装nfs客户端
apt-get install nfs-common
# 2.查看指定ip的nfs挂载情况
showmount -e 10.211.58.18
# 3.创建挂载目录
mkdir -p /nfs/data
# 4.挂载
mount -t nfs 10.211.58.18:/nfs/data /nfs/data
# 5.写入测试文件
echo "hello nfs server" > /nfs/data/test.txt
5.2 原生的方式数据挂载
缺点:相关的pod释放后,不会自动删除挂载的数据
# k8s不会自动创建目录,需要先手动创建
mkdir -p /nfs/data/nginx-pv
echo "hello world" > /nfs/data/nginx-pv/index.html
# my-raw-store.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx-pv-demo
name: nginx-pv-demo
spec:
replicas: 2
selector:
matchLabels:
app: nginx-pv-demo
template:
metadata:
labels:
app: nginx-pv-demo
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
volumes:
- name: html
nfs:
server: 10.211.55.18
path: /nfs/data/nginx-pv
# 验证
# 1.获取pod的ip
root@master:~# kubectl get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-pv-demo-7fb4fc5c7c-6gvsb 1/1 Running 0 2m9s 10.244.1.46 node2 <none> <none>
nginx-pv-demo-7fb4fc5c7c-hstnz 1/1 Running 0 2m9s 10.244.2.41 node1 <none> <none>
# 2.curl发送请求
root@master:~# curl 10.244.1.46
hello world
root@master:~# curl 10.244.2.41
hello world
5.3 PV&PVC的方式数据挂载
pv:持久卷,将应用需要持久化的数据保持到指定位置
pvc:持久卷声明,声明需要使用到持久卷规格
- 创建pv池(静态)
#nfs主节点
mkdir -p /nfs/data/01
mkdir -p /nfs/data/02
mkdir -p /nfs/data/03
- 创建pv
# my-pv.yml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv01-10m
spec:
capacity:
storage: 10M
accessModes:
- ReadWriteMany
storageClassName: nfs
nfs:
path: /nfs/data/01
server: 10.211.55.18
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv02-1gi
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
storageClassName: nfs
nfs:
path: /nfs/data/02
server: 10.211.55.18
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv03-3gi
spec:
capacity:
storage: 3Gi
accessModes:
- ReadWriteMany
storageClassName: nfs
nfs:
path: /nfs/data/03
server: 10.211.55.18
- 创建pvc
# my-pvc.yml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nginx-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi
storageClassName: nfs
- 创建Pod绑定PVC
# my-deploy-pvc.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx-deploy-pvc
name: nginx-deploy-pvc
spec:
replicas: 2
selector:
matchLabels:
app: nginx-deploy-pvc
template:
metadata:
labels:
app: nginx-deploy-pvc
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- name: html
mountPath: /usr/share/nginx/html
volumes:
- name: html
persistentVolumeClaim:
claimName: nginx-pvc
- 测试
# 1.查看pv绑定状况
root@master:/nfs/data/02# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pv01-10m 10M RWX Retain Available nfs 7m31s
pv02-1gi 1Gi RWX Retain Bound default/nginx-pvc nfs 7m31s
pv03-3gi 3Gi RWX Retain Available nfs 7m31s
# 2.查看pvc绑定状况
root@master:/nfs/data/02# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
nginx-pvc Bound pv02-1gi 1Gi RWX nfs 5m3s
# 3.deploy配置文件中可以发现 pod绑定nginx-pvc,nginx-pvc绑定pv02-1gi,pv02-1gi映射的位置为 /nfs/data/02,所以可以在02目录下创建index.html文件进行验证
echo "hello world" > /nfs/data/02/index.html
# 4.获取pod的ip
root@master:/nfs/data/02# kubectl get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deploy-pvc-ffd69b77b-6zl42 1/1 Running 0 7m22s 10.244.2.42 node1 <none> <none>
nginx-deploy-pvc-ffd69b77b-wfbfj 1/1 Running 0 7m22s 10.244.1.47 node2 <none> <none>
# 5.查看curl结果
root@master:/nfs/data/02# curl 10.244.2.42
hello world
root@master:/nfs/data/02# curl 10.244.1.47
hello world
5.4 ConfigMap
抽取应用配置,可以实现自动更新。configMap可以用来将配置文件挂载出来,方便配置。
- 创建redis.conf配置文件
# redis.conf
appendonly yes
- 将配置文件创建为配置集
# 创建配置,redis配置保存到k8s的etcd;
kubectl create cm redis-conf --from-file=redis.conf
- 创建pod,采用配置集
# my-cm.yml
apiVersion: v1
kind: Pod
metadata:
name: redis
spec:
containers:
- name: redis
image: redis
command:
- redis-server
- "/redis-master/redis.conf" #指的是redis容器内部的位置
ports:
- containerPort: 6379
volumeMounts:
- mountPath: /data
name: data
- mountPath: /redis-master
name: config
volumes:
- name: data
emptyDir: {}
- name: config
configMap:
name: redis-conf
items:
- key: redis.conf
path: redis.conf
- 测试
# 1.进入容器的redis,查看当前环境信息
kubectl exec -it redis -- redis-cli
127.0.0.1:6379> config get appendonly
127.0.0.1:6379> set name yorick
OK
127.0.0.1:6379> get name
"yorick"
1) "appendonly"
2) "yes"
127.0.0.1:6379> config get requirepass
1) "requirepass"
2) ""
# 2.修改配置集的信息
kubectl edit cm redis-conf
# 加入配置:requirepass 199748
# 3.进入部署redis的node节点,docker容器重启redis,加载最新配置
docker ps | grep redis
docker restart 73813a809389
# 4.查看修改配置后的环境信息,配置生效
kubectl exec -it redis -- redis-cli
127.0.0.1:6379> get name
(error) NOAUTH Authentication required.
127.0.0.1:6379> auth 199748
OK
127.0.0.1:6379> get name
"yorick"