k8s builds MySQL master-slave synchronization

env

k8s: v1.28.2
mysql: v8.0.34
  1. Build nfs server
1. Execute the installation commands on 3 machines respectively
yum install -y nfs-utils

2. Create 3 directories on the master node and write them to /etc/exports
mkdir -p /data/nfs/{<!-- -->mysql-master,mysql-slaver-01,mysql-slaver-01}
cat >> /etc/exports << EOF
/data/nfs/mysql-master *(rw,sync,no_root_squash)
/data/nfs/mysql-slaver-01 *(rw,sync,no_root_squash)
/data/nfs/mysql-slaver-02 *(rw,sync,no_root_squash)

3. Start the nfs server
systemctl enable --now nfs-server //Start the command and execute it on the master node
showmount -e 192.168.10.200 // Check whether the directory is exposed successfully. It can be executed on 3 servers.
  1. Create namespace
- command creation
  kubectl create namespace deploy-test
- Yaml file creation (recommended)
  cat > ./namespace.yaml << EOF
  apiVersion: v1
  kind: Namespace
  metadata:
   name: deploy-test
  spec: {<!-- -->}
  status: {<!-- -->}
  EOF
  1. Create a MySQL password secret
kubectl create secret generic mysql-password \
    --namespace=deploy-test \
    --from-literal=mysql_root_password=root \
    --dyr-run=client \
    -o yaml \
    > mysql_root_password_secret.yaml
kubectl apply -f mysql_root_password_secret.yaml // Execute yaml file
kubectl get secret -n deploy-test // View the created secret
  1. Create the pv and pvc of the master node
1. Create yaml file
cat > ./master/pv-pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: deploy-mysql-master-nfs-pv
  namespace: deploy-test
spec:
  capacity:
    Storage: 1Gi
  accessModes:
    - ReadWriteMany
  nfs:
    server: 192.168.10.200
    path: /data/nfs/mysql-master
  storageClassName: nfs
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: deploy-mysql-master-nfs-pvc
  namespace: deploy-test
spec:
  accessModes:
    - ReadWriteMany
  storageClassName: nfs
  resources:
    requests:
      Storage: 1Gi
  volumeName: deploy-mysql-master-nfs-pv
EOF

2. Apply yaml file
kubectl apply -f ./master/pv-pvc.yaml

3. View pv, pvc
kubectl get pv,pvc -n deploy-test
  1. Prepare a configuration file for the master node
cat > ./master/my.cnf << EOF
[mysqld]
skip-host-cache
skip-name-resolve
datadir = /var/lib/mysql
socket = /var/run/mysqld/mysqld.sock
secure-file-priv = /var/lib/mysql-files
pid-file = /var/run/mysqld/mysqld.pid
user=mysql
secure-file-priv=NULL
server-id=1
log-bin = master-bin
log_bin_index = master-bin.index
binlog_do_db = xiaohh_user
binlog_ignore_db = information_schema
binlog_ignore_db = mysql
binlog_ignore_db = performance_schema
binlog_ignore_db = sys
binlog-format=ROW

[client]
socket = /var/run/mysqld/mysqld.sock

!include /etc/mysql/conf.d/
EOF
  1. Based on the configuration file in step 5, create a ConfigMap to store this configuration file
1. Use kubectl to generate yaml configuration file
kubectl create configmap mysql-master-cm \
    -n deploy-test\
    --from-file=./master/my.cnf \
    --dry-run=client \
    -o yaml \
    > ./master/mysql-master.yaml
    
2. Continue to add this yaml file
cat >> ./master/mysql-master.yaml << EOF
---
apiVersion: v1
Kind: Service
metadata:
  name: deploy-mysql-master-svc
  namespace: deploy-test
  labels:
    app: mysql-master
spec:
  ports:
  - port: 3306
    name: mysql
    targetPort: 3306
    nodePort: 30306
  selector:
    app: mysql-master
  type: NodePort
  sessionAffinity: ClientIP
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: deploy-mysql-master
  namespace: deploy-test
spec:
  selector:
    matchLabels:
      app: mysql-master
  serviceName: "deploy-mysql-master-svc"
  replicas: 1
  template:
    metadata:
      labels:
        app: mysql-master
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - args:
        - --character-set-server=utf8mb4
        - --collation-server=utf8mb4_unicode_ci
        - --lower_case_table_names=1
        - --default-time_zone= + 8:00
        name: mysql
        image: docker.io/library/mysql:8.0.34
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-data
          mountPath: /var/lib/mysql
        - name: mysql-conf
          mountPath: /etc/my.cnf
          readOnly: true
          subPath: my.cnf
        env:
        - name: MYSQL_ROOT_PASSWORD
          valueFrom:
            secretKeyRef:
              key: mysql_root_password
              name: mysql-password
      volumes:
      - name: mysql-data
        persistentVolumeClaim:
          claimName: deploy-mysql-master-nfs-pvc
      - name: mysql-conf
        configMap:
          name: mysql-master-cm
          items:
          - key: my.cnf
            mode: 0644
            path:my.cnf
EOF
  1. Apply this profile
kubectl apply -f ./master/mysql-master.yaml
  1. View application status
kubectl get all -o wide -n deploy-test
  1. Enter the container to view
kubectl exec -itn deploy-test pod/deploy-mysql-master-0 -- mysql -u root -p
  1. In the MySQL interactive environment, verify whether logbin is successful
show master status;


11. Create pv, pvc for the first slave node

1. Create a new slave-01 folder
mkdir slave-01

2. Create pv-pvc.yaml
cat > ./slave-01/pv-pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: deploy-mysql-slave-01-nfs-pv
  namespace: deploy-test
spec:
  capacity:
    Storage: 1Gi
  accessModes:
  - ReadWriteMany
  nfs:
    server: 192.168.10.200
    path: /data/nfs/mysql-slaver-01
  storageClassName: "nfs"
  
---

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: deploy-mysql-slave-01-nfs-pvc
  namespace: deploy-test
spec:
  accessModes:
  - ReadWriteMany
  storageClassName: "nfs"
  resources:
    requests:
      Storage: 1Gi
  volumeName: deploy-mysql-slave-01-nfs-pv
EOF

3. Apply pv-pvc.yaml file
kubectl apply -f slave-02/pv-pvc.yaml
  1. Create ./slave-01/my.cnf file
cat > ./slave-01/my.cnf << EOF
[mysqld]
skip-host-cache
skip-name-resolve
datadir = /var/lib/mysql
socket = /var/run/mysqld/mysqld.sock
secure-file-priv = /var/lib/mysql-files
pid-file = /var/run/mysqld/mysqld.pid
user=mysql
secure-file-priv=NULL
server-id=2
log-bin = slave-bin
relay-log = slave-relay-bin
relay-log-index = slave-relay-bin.index

[client]
socket = /var/run/mysqld/mysqld.sock

!includedir /etc/mysql/conf.d/
EOF
  1. Generate the yaml file of configmap based on the file created in the previous step
kubectl create configmap mysql-slave-01-cm \
    -n deploy-test\
    --from-file=./slave-01/my.cnf \
    --dry-run=client \
    -o yaml \
    > ./slave-01/mysql-slave-01.yaml
  1. Supplement the yaml file created in the previous step
cat >> ./slave-01/mysql-slave-01.yaml << EOF
---
apiVersion: v1
Kind: Service
metadata:
  name: deploy-mysql-slave-svc
  namespace: deploy-test
  labels:
    app: mysql-slave
spec:
  ports:
  - port: 3306
    name: mysql
    targetPort: 3306
    nodePort: 30308
  selector:
    app: mysql-slave
  type: NodePort
  sessionAffinity: ClientIP
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: deploy-mysql-slave-01
  namespace: deploy-test
spec:
  selector:
    matchLabels:
      app: mysql-slave
  serviceName: "deploy-mysql-slave-svc"
  replicas: 1
  template:
    metadata:
      labels:
        app: mysql-slave
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - args:
        - --character-set-server=utf8mb4
        - --collation-server=utf8mb4_unicode_ci
        - --lower_case_table_names=1
        - --default-time_zone= + 8:00
        name: mysql
        image: docker.io/library/mysql:8.0.34
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-data
          mountPath: /var/lib/mysql
        - name: mysql-conf
          mountPath: /etc/my.cnf
          readOnly: true
          subPath: my.cnf
        env:
        - name: MYSQL_ROOT_PASSWORD
          valueFrom:
            secretKeyRef:
              key: mysql_root_password
              name: mysql-password
      volumes:
      - name: mysql-data
        persistentVolumeClaim:
          claimName: deploy-mysql-slave-01-nfs-pvc
      - name: mysql-conf
        configMap:
          name: mysql-slave-01-cm
          items:
          - key: my.cnf
            mode: 0644
            path:my.cnf
EOF
  1. Apply the yaml file generated in the previous step
1. Apply yaml file
kubectl apply -f slave-01/mysql-slave-01.yaml

2. Check status
kubectl get all -o wide -n deploy-test
  1. Create pv, pvc for the second slave node
1. Create slave-02 folder
mkdir slave-02

2. Create pv-pvc.yaml
cat > ./slave-02/pv-pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
  name: deploy-mysql-slave-02-nfs-pv
  namespace: deploy-test
spec:
  capacity:
    storage: 1Gi
  accessModes:
  - ReadWriteMany
  nfs:
    server: 192.168.10.200
    path: /data/nfs/mysql-slaver-02
  storageClassName: "nfs"
  
---

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: deploy-mysql-slave-02-nfs-pvc
  namespace: deploy-test
spec:
  accessModes:
  - ReadWriteMany
  storageClassName: "nfs"
  resources:
    requests:
      Storage: 1Gi
  volumeName: deploy-mysql-slave-02-nfs-pv
EOF

3. Apply pv-pvc.yaml file
kubectl apply -f slave-01/pv-pvc.yaml
  1. Create ./slave-02/my.cnf
cat > ./slave-02/my.cnf << EOF
[mysqld]
skip-host-cache
skip-name-resolve
datadir = /var/lib/mysql
socket = /var/run/mysqld/mysqld.sock
secure-file-priv = /var/lib/mysql-files
pid-file = /var/run/mysqld/mysqld.pid
user=mysql
secure-file-priv=NULL
server-id=3
log-bin = slave-bin
relay-log = slave-relay-bin
relay-log-index = slave-relay-bin.index

[client]
socket = /var/run/mysqld/mysqld.sock

!includedir /etc/mysql/conf.d/
EOF
  1. Generate the yaml file of configmap based on the file created in the previous step
kubectl create configmap mysql-slave-02-cm \
    -n deploy-test\
    --from-file=./slave-02/my.cnf \
    --dry-run=client \
    -o yaml \
    > ./slave-02/mysql-slave-02.yaml
  1. Supplement the yaml file created in the previous step
cat >> ./slave-02/mysql-slave-02.yaml << EOF
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: deploy-mysql-slave-02
  namespace: deploy-test
spec:
  selector:
    matchLabels:
      app: mysql-slave
  serviceName: "deploy-mysql-slave-svc"
  replicas: 1
  template:
    metadata:
      labels:
        app: mysql-slave
    spec:
      terminationGracePeriodSeconds: 10
      containers:
      - args:
        - --character-set-server=utf8mb4
        - --collation-server=utf8mb4_unicode_ci
        - --lower_case_table_names=1
        - --default-time_zone= + 8:00
        name: mysql
        image: docker.io/library/mysql:8.0.34
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-data
          mountPath: /var/lib/mysql
        - name: mysql-conf
          mountPath: /etc/my.cnf
          readOnly: true
          subPath: my.cnf
        env:
        - name: MYSQL_ROOT_PASSWORD
          valueFrom:
            secretKeyRef:
              key: mysql_root_password
              name: mysql-password
      volumes:
      - name: mysql-data
        persistentVolumeClaim:
          claimName: deploy-mysql-slave-02-nfs-pvc
      - name: mysql-conf
        configMap:
          name: mysql-slave-02-cm
          items:
          - key: my.cnf
            mode: 0644
            path: my.cnf
EOF
  1. Apply slave-02/mysql-slave-02.yaml
kubectl apply -f slave-02/mysql-slave-02.yaml
  1. Log in to MySQL on both slave nodes
kubectl exec -itn deploy-test pod/pod/deploy-mysql-slave-01-0 -- mysql -uroot -p
kubectl exec -itn deploy-test pod/pod/deploy-mysql-slave-02-0 -- mysql -uroot -p
  1. Execute the master-slave synchronization command on the two slave nodes respectively.
1. Set master-slave synchronization parameters
// master_host = pod name + service name + namespace + '.svc.cluster.local'
change master to \
    master_host='deploy-mysql-master-0.deploy-mysql-master-svc.deploy-test.svc.cluster.local', \
    master_port=3306, \
    master_user='root', \
    master_password='root', \
    master_log_file='master-bin.000003', \
    master_log_pos=157, \
    master_connect_retry=30, \
    get_master_public_key=1;
   
2. Enable master-slave synchronization
start slave;

3. Check node status
show master status; // master node
show slave status\G; // slave node
  1. Verify master-slave synchronization
Create the database xiaohh_user on the master node;
Create data table
Insert data