volumes
emptyDir volume
vim emptydir.yaml
apiVersion: v1 Kind: Pod metadata: name: vol1 spec: containers: - image: busyboxplus name: vm1 command: ["sleep", "300"] volumeMounts: - mountPath: /cache name: cache-volume - name: vm2 image: nginx volumeMounts: - mountPath: /usr/share/nginx/html name: cache-volume volumes: - name: cache-volume emptyDir: medium: Memory sizeLimit: 100Mi
kubectl apply -f emptydir.yaml kubectl get pod
kubectl exec vol1 -c vm1 -it -- sh / # cd /cache/ /cache # curl localhost /cache # echo www.westos.org > index.html /cache # curl localhost /cache # dd if=/dev/zero of=bigfile bs=1M count=200 /cache # du -h bigfile
hostpath volume
vim hostpath.yaml
apiVersion: v1 Kind: Pod metadata: name:vol2 spec: nodeName: k8s4 containers: - image: nginx name: test-container volumeMounts: - mountPath: /usr/share/nginx/html name: test-volume volumes: - name: test-volume hostPath: path: /data type: DirectoryOrCreate
kubectl apply -f hostpath.yaml kubectl get pod -o wide
[root@k8s4 data]# echo www.westos.org > index.html
curl 10.244.106.152
nfs volume
Configure nfsserver
[root@k8s1 ~]# yum install -y nfs-utils [root@k8s1 ~]# vim /etc/exports /nfsdata *(rw,sync,no_root_squash) [root@k8s1 ~]# mkdir -m 777 /nfsdata [root@k8s1 ~]# systemctl enable --now nfs [root@k8s1 ~]# showmount -e
vim nfs.yaml
apiVersion: v1 Kind: Pod metadata: name: nfs spec: containers: - image: nginx name: test-container volumeMounts: - mountPath: /usr/share/nginx/html name: test-volume volumes: - name: test-volume nfs: server: 192.168.92.11 path: /nfsdata
The nfs-utils package needs to be installed on all k8s nodes
yum install -y nfs-utils
If it is not installed, the following error will occur:
kubectl apply -f nfs.yaml kubectl get pod -o wide
Create a test page on the nfsserver side
[root@k8s1 ~]# cd /nfsdata/ [root@k8s1 nfsdata]# echo www.westos.org > index.html
[root@k8s2 volumes]# curl 10.244.106.153
Persistent Volume
Configure nfs output directory
[root@k8s1 ~]# cd /nfsdata/ [root@k8s1 nfsdata]# mkdir pv1 pv2 pv3
Create static pv
vim pv.yaml
apiVersion: v1 kind: PersistentVolume metadata: name: pv1 spec: capacity: Storage: 5Gi volumeMode: Filesystem accessModes: - ReadWriteOnce persistentVolumeReclaimPolicy: Recycle storageClassName: nfs nfs: path: /nfsdata/pv1 server: 192.168.92.11 --- apiVersion: v1 kind: PersistentVolume metadata: name: pv2 spec: capacity: storage: 10Gi volumeMode: Filesystem accessModes: - ReadWriteMany persistentVolumeReclaimPolicy: Delete storageClassName: nfs nfs: path: /nfsdata/pv2 server: 192.168.92.11 --- apiVersion: v1 kind: PersistentVolume metadata: name: pv3 spec: capacity: storage: 15Gi volumeMode: Filesystem accessModes: - ReadOnlyMany persistentVolumeReclaimPolicy: Retain storageClassName: nfs nfs: path: /nfsdata/pv3 server: 192.168.92.11
kubectl apply -f pv.yaml kubectl get pv
Create pvc
vim pvc.yaml
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: pvc1 spec: storageClassName: nfs accessModes: - ReadWriteOnce resources: requests: Storage: 1Gi --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: pvc2 spec: storageClassName: nfs accessModes: - ReadWriteMany resources: requests: storage: 10Gi --- apiVersion: v1 kind: PersistentVolumeClaim metadata: name: pvc3 spec: storageClassName: nfs accessModes: - ReadOnlyMany resources: requests: storage: 15Gi
kubectl apply -f pvc.yaml kubectl get pvc kubectl get pv
Create pod
vim pod.yaml
apiVersion: v1 Kind: Pod metadata: name: test-pod1 spec: containers: - image: nginx name: nginx volumeMounts: - mountPath: /usr/share/nginx/html name: vol1 volumes: - name: vol1 persistentVolumeClaim: claimName: pvc1 --- apiVersion: v1 Kind: Pod metadata: name: test-pod2 spec: containers: - image: nginx name: nginx volumeMounts: - mountPath: /usr/share/nginx/html name: vol1 volumes: - name: vol1 persistentVolumeClaim: claimName: pvc2 --- apiVersion: v1 Kind: Pod metadata: name: test-pod3 spec: containers: - image: nginx name: nginx volumeMounts: - mountPath: /usr/share/nginx/html name: vol1 volumes: - name: vol1 persistentVolumeClaim: claimName: pvc3
kubectl apply -f pod.yaml kubectl get pod -o wide
Create test page in nfs output directory
echo pv1 > pv1/index.html echo pv2 > pv2/index.html echo pv3 > pv3/index.html
[root@k8s2 pv]# curl 10.244.106.154 [root@k8s2 pv]# curl 10.244.106.155 [root@k8s2 pv]# curl 10.244.106.156
To recycle resources, you need to recycle them in order: pod -> pvc -> pv
kubectl delete -f pod.yml kubectl delete -f pvc.yml
After recycling PVC, PV will be recycled and reused
kubectl get pv
Recycling of pv requires pulling the image and importing the image into the node node in advance.
containerd import image
[root@k8s3 ~]# ctr -n=k8s.io image import debian-base.tar [root@k8s4 ~]# ctr -n=k8s.io image import debian-base.tar
Recycle
kubectl delete -f pv.yaml
storageclass
Official website: GitHub – kubernetes-sigs/nfs-subdir-external-provisioner: Dynamic sub-dir volume provisioner on a remote NFS server.
Upload image
Create sa and authorize
[root@k8s2 storageclass]# vim nfs-client.yaml
apiVersion: v1 kind: Namespace metadata: labels: kubernetes.io/metadata.name: nfs-client-provisioner name: nfs-client-provisioner --- apiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner namespace: nfs-client-provisioner --- kind:ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner namespace: nfs-client-provisioner roleRef: kind:ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner namespace: nfs-client-provisioner rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner namespace: nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner namespace: nfs-client-provisioner roleRef: kind: Role name: leader-locking-nfs-client-provisioner apiGroup: rbac.authorization.k8s.io --- apiVersion: apps/v1 Kind: Deployment metadata: name: nfs-client-provisioner labels: app: nfs-client-provisioner namespace: nfs-client-provisioner spec: replicas: 1 strategy: type: Recreate selector: matchLabels: app: nfs-client-provisioner template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: sig-storage/nfs-subdir-external-provisioner:v4.0.2 volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: k8s-sigs.io/nfs-subdir-external-provisioner - name: NFS_SERVER value: 192.168.92.11 - name: NFS_PATH value: /nfsdata volumes: - name: nfs-client-root nfs: server: 192.168.92.11 path: /nfsdata --- apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: nfs-client annotations: storageclass.kubernetes.io/is-default-class: "true" provisioner: k8s-sigs.io/nfs-subdir-external-provisioner parameters: archiveOnDelete: "false"
kubectl apply -f nfs-client.yaml kubectl -n nfs-client-provisioner get pod kubectl get sc
Create pvc
vim pvc.yaml
kind: PersistentVolumeClaim apiVersion: v1 metadata: name: test-claim spec: storageClassName: nfs-client accessModes: - ReadWriteMany resources: requests: storage: 1Gi
kubectl apply -f pvc.yaml kubectl get pvc
Create pod
vim pod.yaml
kind: Pod apiVersion: v1 metadata: name: test-pod spec: containers: - name: test-pod image: busybox command: - "/bin/sh" args: - "-c" - "touch /mnt/SUCCESS & amp; & amp; exit 0 || exit 1" volumeMounts: - name: nfs-pvc mountPath: "/mnt" restartPolicy: "Never" volumes: - name: nfs-pvc persistentVolumeClaim: claimName: test-claim
kubectl apply -f pod.yaml
pod will create a file in pv
Recycle
kubectl delete -f pod.yaml kubectl delete -f pvc.yaml
Set the default storage class so that you do not need to specify storageClassName when creating pvc
kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"} }}'
kubectl get sc
statefulset controller
vim headless.yaml
apiVersion: v1 Kind: Service metadata: name: nginx-svc labels: app: nginx spec: ports: - port: 80 name: web clusterIP: None selector: app: nginx
kubectl apply -f headless.yaml kubectl get svc
vim statefulset.yaml
apiVersion: apps/v1 kind: StatefulSet metadata: name: web spec: serviceName: "nginx-svc" replicas: 3 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx volumeMounts: - name: www mountPath: /usr/share/nginx/html volumeClaimTemplates: - metadata: name:www spec: storageClassName: nfs-client accessModes: - ReadWriteOnce resources: requests: storage: 1Gi
kubectl apply -f statefulset.yaml kubectl get pod
Create a test page in the nfs output directory
echo web-0 > default-www-web-0-pvc-8661e761-2aa9-4514-9a37-45be34af3196/index.html echo web-1 > default-www-web-1-pvc-79b4afc4-c159-409f-8757-35635befa584/index.html echo web-2 > default-www-web-2-pvc-ee2ae058-a2d9-4f94-b55c-d69ef2f3c0b6/index.html
kubectl run demo --image busyboxplus -it
/ # curl web-0.nginx-svc / # curl web-1.nginx-svc / # curl web-2.nginx-svc
statefulset orderly recycling
kubectl scale statefulsets web --replicas=0 kubectl delete -f statefulset.yaml kubectl delete pvc --all
mysql master-slave deployment
Official website: https://v1-25.docs.kubernetes.io/zh-cn/docs/tasks/run-application/run-replicated-stateful-application/
Upload image
vim configmap.yaml
apiVersion: v1 kind: ConfigMap metadata: name: mysql labels: app:mysql app.kubernetes.io/name: mysql data: primary.cnf: | [mysqld] log-bin replica.cnf: | [mysqld] super-read-only
kubectl apply -f configmap.yaml kubectl get cm
vim svc.yaml
apiVersion: v1 Kind: Service metadata: name: mysql labels: app:mysql app.kubernetes.io/name: mysql spec: ports: - name: mysql port: 3306 clusterIP: None selector: app:mysql --- apiVersion: v1 Kind: Service metadata: name: mysql-read labels: app:mysql app.kubernetes.io/name: mysql readonly: "true" spec: ports: - name: mysql port: 3306 selector: app: mysql
kubectl apply -f svc.yaml kubectl get svc
vim statefulset.yaml
apiVersion: apps/v1 kind: StatefulSet metadata: name: mysql spec: selector: matchLabels: app:mysql app.kubernetes.io/name: mysql serviceName: mysql replicas: 3 template: metadata: labels: app:mysql app.kubernetes.io/name: mysql spec: initContainers: - name: init-mysql image: mysql:5.7 command: - bash - "-c" - | set-ex # Generate the ID of the MySQL server based on the Pod serial number. [[ $HOSTNAME =~ -([0-9] + )$ ]] || exit 1 ordinal=${BASH_REMATCH[1]} echo [mysqld] > /mnt/conf.d/server-id.cnf # Add an offset to avoid using the reserved value of server-id=0. echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf # Copy the appropriate conf.d file from config-map to emptyDir. if [[ $ordinal -eq 0 ]]; then cp /mnt/config-map/primary.cnf /mnt/conf.d/ else cp /mnt/config-map/replica.cnf /mnt/conf.d/ fi volumeMounts: - name: conf mountPath: /mnt/conf.d - name: config-map mountPath: /mnt/config-map - name: clone-mysql image:xtrabackup:1.0 command: - bash - "-c" - | set-ex # If data already exists, skip cloning. [[ -d /var/lib/mysql/mysql ]] & amp; & amp; exit 0 # Skip cloning of the master instance (serial index 0). [[ `hostname` =~ -([0-9] + )$ ]] || exit 1 ordinal=${BASH_REMATCH[1]} [[ $ordinal -eq 0 ]] & amp; & amp; exit 0 # Clone data from original peer. ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql # Prepare backup. xtrabackup --prepare --target-dir=/var/lib/mysql volumeMounts: - name: data mountPath: /var/lib/mysql subPath: mysql - name: conf mountPath: /etc/mysql/conf.d containers: - name: mysql image: mysql:5.7 env: - name: MYSQL_ALLOW_EMPTY_PASSWORD value: "1" ports: - name: mysql containerPort: 3306 volumeMounts: - name: data mountPath: /var/lib/mysql subPath: mysql - name: conf mountPath: /etc/mysql/conf.d resources: requests: cpu: 500m memory: 512Mi livenessProbe: exec: command: ["mysqladmin", "ping"] initialDelaySeconds: 30 periodSeconds: 10 timeoutSeconds: 5 readinessProbe: exec: # Check if we can perform the query over TCP (skip-networking is off). command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"] initialDelaySeconds: 5 periodSeconds: 2 timeoutSeconds: 1 - name: xtrabackup image:xtrabackup:1.0 ports: - name: xtrabackup containerPort: 3307 command: - bash - "-c" - | set-ex cd /var/lib/mysql # Determine the binlog location of cloned data (if any). if [[ -f xtrabackup_slave_info & amp; & amp; "x$(<xtrabackup_slave_info)" != "x" ]]; then # XtraBackup has generated part of the "CHANGE MASTER TO" query # Because we are cloning from an existing copy. (Need to remove the semicolon at the end!) cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in # Ignore xtrabackup_binlog_info here (it is useless). rm -f xtrabackup_slave_info xtrabackup_binlog_info elif [[ -f xtrabackup_binlog_info ]]; then # We clone directly from the master instance. Parse binlog location. [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]] + (.*?)$ ]] || exit 1 rm -f xtrabackup_binlog_info xtrabackup_slave_info echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\ MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in fi # Check if we need to complete cloning by starting replication. if [[ -f change_master_to.sql.in ]]; then echo "Waiting for mysqld to be ready (accepting connections)" until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done echo "Initializing replication from clone position" mysql -h 127.0.0.1 \ -e "$(<change_master_to.sql.in), \ MASTER_HOST='mysql-0.mysql', \ MASTER_USER='root', \ MASTER_PASSWORD='', \ MASTER_CONNECT_RETRY=10; \ START SLAVE;" || exit 1 # If the container is restarted, try at most once. mv change_master_to.sql.in change_master_to.sql.orig fi # Start the server to send backups when requested by the peer. exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \ "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root" volumeMounts: - name: data mountPath: /var/lib/mysql subPath: mysql - name: conf mountPath: /etc/mysql/conf.d resources: requests: cpu: 100m memory: 100Mi volumes: - name: conf emptyDir: {} - name: config-map configMap: name: mysql volumeClaimTemplates: - metadata: name: data spec: accessModes: ["ReadWriteOnce"] resources: requests: storage: 10Gi
kubectl apply -f statefulset.yaml kubectl get pod
Connection test
kubectl run demo --image mysql:5.7 -it -- bash root@demo:/# mysql -h mysql-0.mysql mysql> show databases;
Recycle
kubectl delete -f statefulset.yaml kubectl delete pvc --all