k8s-1.25.2 one-click deployment script (one master and multiple slaves)

kubeadm one-click deployment one master and multiple slaves k8s-1.25.2 cluster (containerd runtime)

containerd-1.6.8

k8s-1.25.2

ssh-keygen

for i in master node{1..2}; do echo ">>> $i";ssh-copy-id $i;done
cat > k8s-1.25.2.sh << 'eof'
#!/bin/bash

start=$(date + %s)

node=$1

# Environment preparation

# 1. Turn off the firewall
echo -e "\e[32;5m[=====? ? ? ? ? ? の Close firewall の ? ? ? ? ?=====]\e[0m"

for i in ${node[*]}; do echo -e "\e[32;5m>>> $i\e[0m";ssh root@$i "systemctl stop firewalld & amp; & amp; systemctl disable firewalld"; done

# 2, #Permanently close seLinux (need to restart the system to take effect)

for i in ${node[*]}; do echo -e "\e[32;5m>>> $i\e[0m";ssh root@$i "setenforce 0 & amp; & amp; sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config"; done

# 3. Close swap
echo -e "\e[32;5m[=====? ? ? ? ? の Close swap の ? ? ? ? ?=====]\e[0m"

for i in ${node[*]}; do echo -e "\e[32;5m>>> $i\e[0m";ssh root@$i "swapoff -a"; done

for i in ${node[*]}; do echo -e "\e[32;5m>>> $i\e[0m";ssh root@$i "sed -i 's/.*swap.* /# & amp;/g' /etc/fstab"; done

# 4. Load the IPVS module
echo -e "\e[32;5m[=====? ? ? ? ? ? の load IPVS module の ? ? ? ? ?=====]\e[0m"

for i in ${node[*]}; do echo -e "\e[32;5m>>> $i\e[0m";ssh root@$i "yum -y install ipset ipvsadm"; done

cat > /etc/modules-load.d/ipvs.conf << EOF
modprobe --ip_vs
modprobe --ip_vs_rr
modprobe --ip_vs_wrr
modprobe --ip_vs_sh
modprobe --nf_conntrack
EOF

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
scp /etc/modules-load.d/ipvs.conf root@$i:/etc/modules-load.d;
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "bash -x /etc/modules-load.d/ipvs.conf";
done

# 4. Install container
echo -e "\e[32;5m[=====? ? ? ? ? のinstall container.io-v1.6.8-1 の? ? ? ? ?=====]\e[0m "

wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo

sed -i 's + download.docker.com + repo.huaweicloud.com/docker-ce + ' /etc/yum.repos.d/docker-ce.repo

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
scp /etc/yum.repos.d/docker-ce.repo root@$i:/etc/yum.repos.d/docker-ce.repo;
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "yum -y install containerd.io-1.6.8";
done

cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
scp /etc/modules-load.d/containerd.conf root@$i:/etc/modules-load.d;
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "modprobe overlay & amp; & amp; modprobe br_netfilter";
done

cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables=1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables=1
EOF

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
scp /etc/sysctl.d/99-kubernetes-cri.conf root@$i:/etc/sysctl.d;
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i sysctl --system;
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "mkdir -p /etc/containerd & amp; & amp; containerd config default > /etc/containerd/config.toml";
done

# Modify cgroup Driver to systemd
echo -e "\e[32;5m[=====? ? ? ? ? の Modify cgroup Driver to systemd の ? ? ? ? ?=====]\e[0m"

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "sed -ri 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml";
done

# Change sandbox_image to pause:3.8
echo -e "\e[32;5m[=====? ? ? ? ? の change sandbox_image to pause:3.8 の ? ? ? ? ?=====]\e[0m"

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "sed -ri 's#k8s.gcr.io\/pause:3.6#registry.aliyuncs.com\/google_containers\/pause:3.8#' /etc/containerd/config.toml";
done

#Add the mirror source of Alibaba Cloud to the endpoint location
for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "sed -ri 's#https:\/\/registry-1.docker.io#https:\/\/registry.aliyuncs.com#' /etc/containerd/config.toml";
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "systemctl daemon-reload & amp; & amp; systemctl enable containerd --now";
done

# 5. Install k8s-1.25.2
echo -e "\e[32;5m[=====? ? ? ? ? のinstall kubelet-v1.25.0 kubelet-v1.25.0 kubectl-v1.25.0 の? ? ? ? ?==== =]\e[0m"

cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.cloud.tencent.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
scp /etc/yum.repos.d/kubernetes.repo root@$i:/etc/yum.repos.d/kubernetes.repo;
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "yum -y install kubeadm-1.25.2-0 kubelet-1.25.2-0 kubectl-1.25.2-0";
done

# set crictl
echo -e "\e[32;5m[=====? ? ? ? ? の set crictl の ? ? ? ? ?=====]\e[0m"

cat << EOF >> /etc/crictl.yaml
runtime-endpoint: unix:///var/run/containerd/containerd.sock
image-endpoint: unix:///var/run/containerd/containerd.sock
timeout: 10
debug: false
EOF

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
scp /etc/crictl.yaml root@$i:/etc/crictl.yaml;
done

# Initialize yml
mkdir ~/kubeadm_init & & cd ~/kubeadm_init

cat > kubeadm-init.yaml << EOF
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
-groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  -signing
  - authentication
kind:InitConfiguration
localAPIEndpoint:
  advertiseAddress: `hostname -i` #master_ip
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  imagePullPolicy: IfNotPresent
  name: master
  taints:
  - effect: "NoSchedule"
    key: "node-role.kubernetes.io/master"
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.25.2
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
EOF

# pre-fetch image
echo -e "\e[32;5m[=====? ? ? ? ? ? の Pre-fetch image の ? ? ? ? ?=====]\e[0m"

kubeadm config images pull --config kubeadm-init.yaml

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "crictl pull registry.aliyuncs.com/google_containers/pause:3.8 & amp; & amp; crictl pull registry.aliyuncs.com/google_containers/kube-proxy:v1.25.0";
done

# Initialize the cluster
echo -e "\e[32;5m[=====? ? ? ? ? の Initialize cluster の ? ? ? ? ?=====]\e[0m"

kubeadm init --config=kubeadm-init.yaml | tee kubeadm-init.log

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# Join the cluster
echo -e "\e[32;5m[=====? ? ? ? ? の join cluster の ? ? ? ? ?=====]\e[0m"

cat ~/kubeadm_init/kubeadm-init.log |grep token |tail -2 >join.token.sh

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
scp ~/kubeadm_init/join.token.sh root@$i:/root/join.token.sh;
done

for i in ${node[*]};do
echo -e "\e[32;5m>>> $i\e[0m";
ssh root@$i "bash /root/join.token.sh 1>/dev/null 2> & amp;1";
done

# install flannel
echo -e "\e[32;5m[=====? ? ? ? ? の install flannel の ? ? ? ? ?=====]\e[0m"

cat > ~/kube-flannel.yml << 'EOF'
---
kind:Namespace
apiVersion: v1
metadata:
  name: kube-flannel
  labels:
    pod-security.kubernetes.io/enforce: privileged
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  -nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  -nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-flannel
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-flannel
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni-plugin
       #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
        command:
        -cp
        args:
        - -f
        - /flannel
        - /opt/cni/bin/flannel
        volumeMounts:
        - name: cni-plugin
          mountPath: /opt/cni/bin
      - name: install-cni
       #image: flannelcni/flannel:v0.19.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannel cni-flannel:v0.19.0
        command:
        -cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        -name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
       #image: flannelcni/flannel:v0.19.0 for ppc64le and mips64le (dockerhub limitations may apply)
        image: docker.io/rancher/mirrored-flannel cni-flannel:v0.19.0
        command:
        - /opt/bin/flanneld
        args:
        --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        - name: EVENT_QUEUE_DEPTH
          value: "5000"
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
        - name: xtables-lock
          mountPath: /run/xtables.lock
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni-plugin
        hostPath:
          path: /opt/cni/bin
      -name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg
      - name: xtables-lock
        hostPath:
          path: /run/xtables.lock
          type: FileOrCreate
EOF

kubectl apply -f ~/kube-flannel.yml

# kubectl completion
echo -e "\e[32;5m[=====? ? ? ? ? の kubectl completion の ? ? ? ? ?=====]\e[0m"

yum install bash-completion -y

source /etc/profile.d/bash_completion.sh

echo "source <(crictl completion bash)" >> ~/.bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc

echo -e "\033[0;33m
 ██╗ ██╗ █████╗ ███████╗
 ██║ ██╔╝██╔══██╗██╔════╝
 █████╔╝ ╚█████╔╝███████╗
 ██╔═██╗ ██╔══██╗╚════██║
 ██║ ██╗╚█████╔╝███████║
 ╚═╝ ╚═╝ ╚════╝ ╚══════ has been installed !!!\033[0m"
 
# script execution time
end=$(date + %s)
take=$(( end - start ))
echo -e "\e[32;5m[=========================]\e[0m"
echo -e "\e[32;5m script execution time ---> ${take} seconds \e[0m"
echo -e "\e[32;5m[=========================]\e[0m"
eof
chmod +x k8s-1.25.2.sh

./k8s-1.25.2.sh "master node1 node2"