kubernetes cluster orchestration – k8s scheduling

nodename

vim nodename.yaml
apiVersion: v1
Kind: Pod
metadata:
  name: nginx
  labels:
    app: nginx

spec:
  containers:
  - name: nginx
    image: nginx
  nodeName: k8s2

nodeName: k8s2 #If the node pod cannot be found, pending will appear, with the highest priority.

kubectl apply -f nodename.yaml

kubectl get pod -o wide

Recycle

kubectl delete -f nodename.yaml

nodeselector

vim nodeselector.yaml
apiVersion: v1
Kind: Pod
metadata:
  name: nginx
spec:
  containers:
  - name: nginx
    image: nginx
    imagePullPolicy: IfNotPresent
  nodeSelector:
    disktype: ssd
kubectl label nodes k8s4 disktype=ssd

kubectl label nodes k8s3 disktype=ssd
kubectl apply -f nodeselector.yaml

Recycle

kubectl delete -f nodeselector.yaml

nodeaffinity

vim nodeaffinity.yaml
apiVersion: v1
Kind: Pod
metadata:
  name: node-affinity
spec:
  containers:
  - name: nginx
    image: nginx
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
           nodeSelectorTerms:
           - matchExpressions:
             -key:disktype
               operator: In
               values:
                 -ssd
                 -fc

      preferredDuringSchedulingIgnoredDuringExecution:
      - weight: 1
        preference:
          matchExpressions:
          - key: kubernetes.io/hostname
            operator: NotIn
            values:
            - k8s3
kubectl apply -f nodeaffinity.yaml
kubectl describe pod node-affinity

Recycle

kubectl delete -f nodeaffinity.yaml

podaffinity

vim podaffinity.yaml
apiVersion: apps/v1
Kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
      affinity:
        podAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              -key: app
                operator: In
                values:
                - nginx
            topologyKey: "kubernetes.io/hostname"
kubectl apply -f podaffinity.yaml

kubectl get pod -o wide

Recycle

kubectl delete -f podaffinity.yaml

podantiaffinity

vim podantiaffinity.yaml
apiVersion: apps/v1
Kind: Deployment
metadata:
  name: nginx-deployment
  labels:
    app: nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              -key: app
                operator: In
                values:
                - nginx
            topologyKey: "kubernetes.io/hostname"
kubectl apply -f podantiaffinity.yaml

kubectl get pod -o wide

Recycle

kubectl delete -f podantiaffinity.yaml

pod anti-affinity tendency satisfies

vim poda.yaml
apiVersion: apps/v1
Kind: Deployment
metadata:
  name: node-affinity
spec:
 replicas: 3
 selector:
  matchLabels:
   app: nginx
 template:
  metadata:
   labels:
    app: nginx
  spec:
   tolerations:
   - effect: NoSchedule
     operator: Exists
   - effect: NoExecute
     operator: Exists
   containers:
   - name: nginx
     image: nginx
   affinity:
     podAntiAffinity:
       preferredDuringSchedulingIgnoredDuringExecution:
       - weight: 100
         podAffinityTerm:
           labelSelector:
             matchExpressions:
             -key: app
               operator: In
               values:
               - nginx
           topologyKey: kubernetes.io/hostname

     nodeAffinity:
       requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              -key:disktype
                operator: In
                values:
                  -ssd
                  - sata
kubectl apply -f poda.yaml

kubectl get pod -o wide

Recycle

kubectl delete -f poda.yaml

Taints

vim taint.yaml
apiVersion: apps/v1
Kind: Deployment
metadata:
  labels:
    app: web
  name: web
spec:
  replicas: 3
  selector:
    matchLabels:
      app: web
  template:
    metadata:
      labels:
        app: web
    spec:
      containers:
      - image: nginx
        name: nginx
kubectl apply -f taint.yaml

kubectl get pod -o wide

set taint

kubectl taint node k8s3 k1=v1:NoSchedule

kubectl describe nodes k8s3 |grep Tain

kubectl scale deployment web --replicas 6

kubectl get pod -o wide

kubectl taint node k8s3 k1=v1:NoExecute

kubectl describe nodes k8s3 |grep Tain

kubectl get pod -o wide

Recycle

kubectl delete -f taint.yaml

Set tolerations

vim taint.yaml
apiVersion: apps/v1
Kind: Deployment
metadata:
  labels:
    app: web
  name: web
spec:
  replicas: 6
  selector:
    matchLabels:
      app: web
  template:
    metadata:
      labels:
        app: web
    spec:
      tolerations:
      - operator: Exists
        effect:NoSchedule
      containers:
      - image: nginx
        name: nginx
kubectl apply -f taint.yaml

kubectl get pod -o wide

Recycle

kubectl delete -f taint.yaml

tolerate all taints

vim taint.yaml
apiVersion: apps/v1
Kind: Deployment
metadata:
  labels:
    app: web
  name: web
spec:
  replicas: 6
  selector:
    matchLabels:
      app: web
  template:
    metadata:
      labels:
        app: web
    spec:
      tolerations:
      - operator: Exists
      containers:
      - image: nginx
        name: nginx
kubectl apply -f taint.yaml

kubectl get pod -o wide

Recycle

kubectl delete -f taint.yaml

Delete taints

kubectl taint node k8s3 k1-

cordon, drain, delete

kubectl create deployment demo --image nginx --replicas 3

kubectl get pod -o wide

kubectl cordon k8s3

kubectl get node

kubectl scale deployment demo --replicas 6

kubectl get pod -o wide

kubectl drain k8s3 --ignore-daemonsets

kubectl get pod -o wide

kubectl delete nodes k8s3

kubectl get node

The k8s3 node restarts the kubelet service and rejoins the cluster.

[root@k8s3 ~]# systemctl restart kubelet

[root@k8s2 node]# kubectl get node