Centos7 deploys stand-alone K8S

1. Environment initialization

1. Operating system version

# requires Centos version to be 7.5 or above
[root@k8s-master ~]# cat /etc/redhat-release
CentOS Linux release 7.9.2009 (Core)

2. Host name resolution

# The host name is resolved Edit the /etc/hosts file of the server and add the following content
192.168.43.120 master

3. Time synchronization

# Start chronyd service
[root@k8s-master ~]# systemctl start chronyd
# Set the chronyd service to start automatically at boot
[root@k8s-master ~]# systemctl enable chronyd
# The chronyd service starts and waits for a few seconds, then you can use the date command to verify the time
[root@k8s-master ~]# date

4. Disable firewall

[root@k8s-master ~]# systemctl stop firewalld
[root@k8s-master ~]# systemctl disable firewalld

5. Disable selinux

# Edit the /etc/selinux/config file, modify the value of SELINUX to disabled
# Note that you need to restart the linux service after the modification
SELINUX=disabled

6. Disable swap partition

# Edit the partition configuration file /etc/fstab, comment out the swap partition line
# Note that you need to restart the linux service after the modification
[root@k8s-master ~]# vim /etc/fstab
/dev/mapper/centos-root/xfs defaults 0 0
UUID=532ab9ca-839e-4ca2-9ac5-b871d9cc7f71 /boot xfs defaults 0 0
#/dev/mapper/centos-swap swap swap defaults 0 0

7. Modify the kernel parameters of linux

# Modify the kernel parameters of linux, add bridge filtering and address forwarding functions
# Edit the /etc/sysctl.d/kubernetes.conf file and add the following configuration:
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.ipv4.ip_forward = 1

# reload configuration
[root@k8s-master ~]# sysctl -p

# Load the bridge filter module
[root@k8s-master ~]# modprobe br_netfilter

# Check whether the bridge filter module is loaded successfully
[root@k8s-master ~]# lsmod | grep br_netfilter

8. Configure ipvs function

# 1 Install ipset and ipvsadm
[root@k8s-master ~]# yum install ipset ipvsadmin -y

# 2 Add the modules that need to be loaded into the script file
[root@k8s-master ~]# cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe --ip_vs
modprobe --ip_vs_rr
modprobe --ip_vs_wrr
modprobe --ip_vs_sh
modprobe --nf_conntrack_ipv4
EOF

# 3 Add execution permission to the script file
[root@k8s-master ~]# chmod + x /etc/sysconfig/modules/ipvs.modules

# 4 Execute the script file
[root@k8s-master ~]# /bin/bash /etc/sysconfig/modules/ipvs.modules

# 5 Check whether the corresponding module is loaded successfully
[root@k8s-master ~]# lsmod | grep -e ip_vs -e nf_conntrack_ipv4

9. Restart the machine

[root@k8s-master ~]# reboot

2. Install docker

# 1 switch mirror source
[root@k8s-master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo

# 2 View the docker version supported in the current image source
[root@k8s-master ~]# yum list docker-ce --showduplicates

# 3 Install a specific version of docker-ce
# Must specify --setopt=obsoletes=0, otherwise yum will automatically install a higher version
[root@k8s-master ~]# yum install --setopt=obsoletes=0 docker-ce-18.06.3.ce-3.el7 -y

# 4 Add a configuration file
# The Cgroup Driver used by Docker by default is cgroupfs, and kubernetes recommends using systemd instead of cgroupfs
[root@k8s-master ~]# mkdir /etc/docker
[root@k8s-master ~]# vim /etc/docker/daemon.json
#Add to
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "registry-mirrors": [
    "https://9cpn8tt6.mirror.aliyuncs.com",
    "https://uy35zvn6.mirror.aliyuncs.com",
    "https://8bhew391.mirror.aliyuncs.com",
    "https://almtd3fa.mirror.aliyuncs.com",
    "https://hccwwfjl.mirror.aliyuncs.com",
    "https://registry.docker-cn.com",
    "http://hub-mirror.c.163.com",
    "https://yxzrazem.mirror.aliyuncs.com"
  ]
}

# 5 start docker
[root@k8s-master ~]# systemctl restart docker
[root@k8s-master ~]# systemctl enable docker

# 6 Check docker status and version
[root@k8s-master ~]# docker version

3. Install kubernetes components

1. Switch domestic mirror sources

# Edit /etc/yum.repos.d/kubernetes.repo, add the following configuration
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

2. Install kubeadm, kubelet and kubectl

[root@k8s-master ~]# yum install --setopt=obsoletes=0 kubeadm-1.17.4-0 kubelet-1.17.4-0 kubectl-1.17.4-0 -y

3. Configure the cgroup of kubelet

# Edit /etc/sysconfig/kubelet, add the following configuration
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"

4. Set kubelet to start automatically

[root@k8s-master ~]# systemctl enable kubelet

4. Prepare mirror

# Before installing the kubernetes cluster, you must prepare the mirrors required by the cluster in advance. The required mirrors can be viewed through the following command
[root@k8s-master ~]# kubeadm config images list

# Download mirror
# This mirror is in the warehouse of kubernetes. Due to network reasons, it cannot be connected. An alternative is provided below
images=(
    kube-apiserver:v1.17.4
    kube-controller-manager:v1.17.4
    kube-scheduler:v1.17.4
    kube-proxy:v1.17.4
    pause: 3.1
    etcd:3.4.3-0
    coredns:1.6.5
)

for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done

5. Initialization

# Initialize Master
kubeadm init \
--kubernetes-version=v1.17.4\
    --pod-network-cidr=10.244.0.0/16\
    --service-cidr=10.96.0.0/12\
    --apiserver-advertise-address=192.168.43.140

# Create necessary files
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

6. Configure network card

# View cluster status The cluster status at this time is NotReady, because the network plug-in has not been configured yet
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady master 6m43s v1.17.4


# Deploy the CNI network plugin
[root@k8s-master ~]# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
[root@k8s-master ~]# kubectl get pods -n kube-system # View running status
NAME READY STATUS RESTARTS AGE
coredns-6955765f44-dc8ww 1/1 Running 0 38m
coredns-6955765f44-svmvw 1/1 Running 0 38m
etcd-k8s-master 1/1 Running 0 38m
kube-apiserver-k8s-master 1/1 Running 0 38m
kube-controller-manager-k8s-master 1/1 Running 0 38m
kube-proxy-9tnhl 1/1 Running 0 38m
kube-scheduler-k8s-master 1/1 Running 0 38m