您当前的位置: 首页 > 慢生活 > 程序人生 网站首页程序人生
46、存储 PV-PVC(3)
发布时间:2023-01-02 22:36:12编辑:雪饮阅读()
Step1
在harbor上再多创建几个用于nfs共享目录的点
[root@hub ~]# cat /etc/exports
/nfs *(rw,no_root_squash,no_all_squash,sync)
/nfs1 *(rw,no_root_squash,no_all_squash,sync)
/nfs2 *(rw,no_root_squash,no_all_squash,sync)
/nfs3 *(rw,no_root_squash,no_all_squash,sync)
[root@hub ~]# mkdir /nfs{1..3}
[root@hub ~]# chmod 777 /nfs1 /nfs2 /nfs3
[root@hub ~]# chown nfsnobody /nfs1 /nfs2 /nfs3
[root@hub ~]# systemctl restart rpcbind
[root@hub ~]# systemctl restart nfs
然后master节点再次测试挂载及写入数据及卸载挂载
[root@k8s-master01 pv]# mkdir /test
[root@k8s-master01 pv]# mount -t nfs 192.168.66.100:/nfs1 /test
[root@k8s-master01 pv]# date > /test/index.html
[root@k8s-master01 pv]# umount /test
[root@k8s-master01 pv]# rm -rf /test
Step2
接下来我们利用这多个nfs挂载点,同时创建多个pv(不同容量,不同storageClassName,上篇有点误人子弟,当然也不全是,虽然这个storageClassName可以自定义,但是是要和接下来创建pvc的时候里面引用的storageClassName一样,否则如果创建pvc依赖了某个pv的storageClassName,但是这个storageClassName不存在则依赖个屁,肯定如果是pvc用于pod上则对应pod就很长一段时间显示pending状态,也可能过段时间就直接显示什么错误,就不会一直等待对应pvc对满足storageClassName的pv的依赖了)
[root@k8s-master01 pv]# cat mpv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv2
spec:
capacity:
storage: 5Gi
accessModes:
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs1
server: 192.168.66.100
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv3
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
storageClassName: slow
nfs:
path: /nfs2
server: 192.168.66.100
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv4
spec:
capacity:
storage: 1Gi
accessModes:
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs3
server: 192.168.66.100
然后创建
[root@k8s-master01 pv]# kubectl create -f mpv.yaml
persistentvolume/nfspv2 created
persistentvolume/nfspv3 created
persistentvolume/nfspv4 created
查看pv列表
有3个nfs类型,一个slow类型
[root@k8s-master01 pv]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Available nfs 136m
nfspv2 5Gi ROX Retain Available nfs 5m31s
nfspv3 5Gi RWX Retain Available slow 5m31s
nfspv4 1Gi ROX Retain Available nfs 5m31s
Step3
创建服务并使用pvc
[root@k8s-master01 pv]# cat ss.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
selector:
matchLabels:
app: nginx
serviceName: "nginx"
replicas: 3
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: wangyanglinux/myapp:v2
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "nfs"
resources:
requests:
storage: 1Gi
可以看到这里就使用到了storageClassName
然后创建
[root@k8s-master01 pv]# kubectl apply -f ss.yaml
service/nginx created
statefulset.apps/web created
可以看到咱们有一个pod是没有运行成功的,一直是pending的状态
[root@k8s-master01 pv]# kubectl get pod
NAME READY STATUS RESTARTS AGE
test-pd 1/1 Running 0 7h42m
web-0 1/1 Running 0 111s
web-1 0/1 Pending 0 108s
查看下原因
[root@k8s-master01 pv]# kubectl describe pod web-1
Name: web-1
Namespace: default
Priority: 0
Node: <none>
Labels: app=nginx
controller-revision-hash=web-65779658bf
statefulset.kubernetes.io/pod-name=web-1
Annotations: <none>
Status: Pending
IP:
Controlled By: StatefulSet/web
Containers:
nginx:
Image: wangyanglinux/myapp:v2
Port: 80/TCP
Host Port: 0/TCP
Environment: <none>
Mounts:
/usr/share/nginx/html from www (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-d8kh2 (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
www:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: www-web-1
ReadOnly: false
default-token-d8kh2:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-d8kh2
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 53s (x3 over 2m8s) default-scheduler pod has unbound immediate PersistentVolumeClaims (repeated 2 times)
原来是pod找不到满足的pvc
根据我们这个使用pvc的这个清单的关于pvc的要求
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "nfs"
resources:
requests :
storage: 1Gi
可以看到我们需要rwo(ReadWriteOnce),且storageClassName为nfs的,且容量为1Gi的
其实对于容量1Gi并不是硬性等于,而是大于等于就行,比如有5Gi和10Gi,也是可以的,只是会默认用满足条件的容量里面最小的那个,那么5Gi和10Gi自然就选择5Gi咯
那么看看我们的pv难道就没有满足的吗?
[root@k8s-master01 pv]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Bound default/www-web-0 nfs 156m
nfspv2 5Gi ROX Retain Available nfs 26m
nfspv3 5Gi RWX Retain Available slow 26m
nfspv4 1Gi ROX Retain Available nfs 26m
可以看到是有满足的web-0就是满足的,但是这次我们使用pvc的要求里面还有就算pod副本数为3
replicas: 3
那么所以我们的pod只满足了三分之一咯
Step4
问题的解决
那么我们只要跳转pv列表让满足上面我们使用pvc清单的pv能多至3个不就ok了
由于刚才已经成功使用了一个pv,我们为了简单清净起见,就先删除了这次pvc使用清单所创建的资源
[root@k8s-master01 pv]# kubectl delete -f ss.yaml
service "nginx" deleted
statefulset.apps "web" deleted
然后刚才的那些pv也都清理了,重新创建一批满足3个pod的pvc使用清单的pv(真正使用时候谨慎,反正这里我本地就我一个人实验用而已,实际使用应该是要考虑到有项目在使用某个pv)
[root@k8s-master01 pv]# kubectl delete -f mpv.yaml
persistentvolume "nfspv2" deleted
persistentvolume "nfspv3" deleted
persistentvolume "nfspv4" deleted
然后再次创建一批满足的pv
[root@k8s-master01 pv]# cat mpv2.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv2
spec:
capacity:
storage: 5Gi
accessModes:
- ReadOnlyMany
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs1
server: 192.168.66.100
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv3
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs2
server: 192.168.66.100
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfspv4
spec:
capacity:
storage: 50Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfs3
server: 192.168.66.100
[root@k8s-master01 pv]# kubectl apply -f mpv2.yaml
persistentvolume/nfspv2 created
persistentvolume/nfspv3 created
persistentvolume/nfspv4 created
然后再次使用上面的使用pvc的资源清单
[root@k8s-master01 pv]# kubectl apply -f ss.yaml
service/nginx created
statefulset.apps/web created
然后稍待片刻就出来了3个pod了
[root@k8s-master01 pv]# kubectl get pod
NAME READY STATUS RESTARTS AGE
test-pd 1/1 Running 0 8h
web-0 1/1 Running 0 4s
web-1 1/1 Running 0 3s
web-2 0/1 Pending 0 1s
[root@k8s-master01 pv]# kubectl get pod
NAME READY STATUS RESTARTS AGE
test-pd 1/1 Running 0 8h
web-0 1/1 Running 0 8s
web-1 1/1 Running 0 7s
web-2 1/1 Running 0 5s
查看到pv也正好是绑定了3个的
[root@k8s-master01 pv]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
nfspv1 10Gi RWO Retain Bound default/www-web-0 nfs 173m
nfspv2 5Gi ROX Retain Available nfs 6m4s
nfspv3 5Gi RWO Retain Bound default/www-web-1 nfs 6m4s
nfspv4 50Gi RWO Retain Bound default/www-web-2 nfs 6m4s
由于每个pod都有pvc请求模板,所以有3个pod就对应有了3个pvc被创建出来哈
[root@k8s-master01 pv]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
www-web-0 Bound nfspv1 10Gi RWO nfs 23m
www-web-1 Bound nfspv3 5Gi RWO nfs 23m
www-web-2 Bound nfspv4 50Gi RWO nfs 2m41s
关键字词:存储,PV,PVC
上一篇:45、存储 PV-PVC(2)
下一篇:47. 存储 PV-PVC(4)