[Kubernetes] K8S practice to feel the function of HPA

1. Make sure metrics-server is installed

[root@jdmaster ~]# cd pod/
[root@jdmaster pod]# mkdir metrics
[root@jdmaster pod]# cd metrics/
[root@jdmaster metrics]# kubectl apply -f aliyun-components.yaml
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created

There are more than 200 lines of code in the aliyun-components.yaml file. I put it at the end. You can use the same yaml file in vim, just paste the content.

Check the pod and apiservice to verify that the metrics-server is installed successfully:
Master node:

[root@jdmaster metrics]# kubectl get pod -n kube-system -o wide|grep metrics
metrics-server-b9f7b695f-4zc5h 1/1 Running 0 3m27s 10.244.2.14 jdnode-1 <none> <none>

[root@jdmaster metrics]# kubectl get apiservice|grep metrics
v1beta1.metrics.k8s.io kube-system/metrics-server True 2m32s

[root@jdmaster metrics]# kubectl top node
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
jdmaster 123m 6% 855Mi 49%
jdnode-1 38m 1% 360Mi 20%
jdnode-2 34m 1% 401Mi 23%

[root@jdmaster metrics]# kubectl top pod
NAME CPU(cores) MEMORY(bytes)
my-nginx-cf54cdbf7-cpjwh 0m 1Mi
my-nginx-cf54cdbf7-dn28k 0m 4Mi
my-nginx-cf54cdbf7-ndnrg 0m 1Mi

On the node node:

[root@jdnode-1 ~]# docker images|grep metrics
registry.aliyuncs.com/google_containers/metrics-server v0.6.0 5787924fe1d8 14 months ago 68.8MB

2. Create hpa function

Can be combined with official documents:
https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/
Steps:
1. Edit Dockerfile and index.php

[root@jdmaster metrics]# cd
[root@jdmaster ~]# mkdir hpa
[root@jdmaster ~]# cd hpa/
[root@jdmaster hpa]# vim Dockerfile
[root@jdmaster hpa]# vim index.php
Dockerfile content:
FROM php:5-apache
COPY index.php /var/www/html/index.php
RUN chmod a + rx index.php
index.php content:
<?php
  $x = 0.0001;
  for ($i = 0; $i <= 1000000; $i ++ ) {<!-- -->
    $x + = sqrt($x);
  }
  echo "OK!";
?>

2. Generate image file

[root@jdmaster hpa]# docker build -t sc-hpa:1.0 .
[ + ] Building 123.2s (8/8) FINISHED
 => [internal] load .dockerignore 0.0s
 => => transferring context: 2B 0.0s
 => [internal] load build definition from Dockerfile 0.0s
 => => transferring dockerfile: 119B 0.0s
 => [internal] load metadata for docker.io/library/php:5-apache 5.2s
 => [internal] load build context 0.0s
 => => transferring context: 136B 0.0s
 => [1/3] FROM docker.io/library/php:5-apache@sha256:0a40fd273961b99d8afe69a61a68c73c04bc0caa9de384d3b2dd9e7986eec86d 117.6s
 => => resolve docker.io/library/php:5-apache@sha256:0a40fd273961b99d8afe69a61a68c73c04bc0caa9de384d3b2dd9e7986eec86d 0.0s
 => => sha256:95f5e2cf93f20c1cd5199f9be5e28097c0317086c4f3492bf33a7d0dc4176b94 3.04kB / 3.04kB 0.0s
 => => sha256:cf165947b5b75ef63a7872634239e795a3063179895699dc8e0726f1039946b3 229B/229B 0.7s
 => => sha256:7bd37682846da479bcfb64459fa36e043d3380a77b401f6de5b862d00d8dcebf 67.44MB / 67.44MB 112.1s
 => => sha256:24c791995c1e498255393db857040793a7a040fa0f8ddd4bbe8fb230648e37d7 12.45kB / 12.45kB 0.0s
 => => sha256:5e6ec7f28fb77f84f64b8c29fcb0a746260563f5858315e3e9fcc4aee2844840 22.50MB / 22.50MB 31.8s
 => => sha256:0a40fd273961b99d8afe69a61a68c73c04bc0caa9de384d3b2dd9e7986eec86d 2.06kB / 2.06kB 0.0s
 => => sha256:99daf8e838e14fb73055ddac03535d506dbf36a5a01c37497ff001c0dbd68f3e 181B/181B 2.1s
 => => sha256:ae320713efba9e138236c82142d67bd9d5f05ef4d4a3de877e7aa27d1456ce3e 17.13MB / 17.13MB 66.3s
 => => sha256:ebcb99c48d8c8dd49d64a2d097966dacca7117b4381a54b6835b0afa487e9814 1.34kB / 1.34kB 33.1s
 => => extracting sha256:5e6ec7f28fb77f84f64b8c29fcb0a746260563f5858315e3e9fcc4aee2844840 1.1s
 => => extracting sha256:cf165947b5b75ef63a7872634239e795a3063179895699dc8e0726f1039946b3 0.0s
 => => sha256:9867e71b4ab60b84952cf76ca4f3446e994d0760b3d6e16658417c496656dca2 430B/430B 33.9s
 => => sha256:936eb418164ae6e2bb965f03cb699d969f0ed568d0f965d6f276c944511dbcfb 487B/487B 34.6s
 => => sha256:bc298e7adaf7d0aa550c78b610cf10f7c71a414aee30948f132c0102579a54b9 12.82MB / 12.82MB 61.9s
 => => sha256:ccd61b587bcd1e85123c101e6bff7ab461b1070ca1758f6f6c53862a2a7b9d7f 498B/498B 63.2s
 => => sha256:b2d4b347f67cc1279b9a7c3643a07d465ab3eac59b96b593be3466220747fd4e 9.73MB / 9.73MB 80.9s
 => => sha256:56e9dde341528a1f5e6bde75bdc8679cb232d3d4c76c319bf4e6bfbaacf21ba7 2.20kB / 2.20kB 67.5s
 => => sha256:9ad99b17eb781e5b1e2d8d71ea4327547e082ceb262544e100bb077da9695e75 906B/906B 68.2s
 => => extracting sha256:7bd37682846da479bcfb64459fa36e043d3380a77b401f6de5b862d00d8dcebf 3.0s
 => => extracting sha256:99daf8e838e14fb73055ddac03535d506dbf36a5a01c37497ff001c0dbd68f3e 0.0s
 => => extracting sha256:ae320713efba9e138236c82142d67bd9d5f05ef4d4a3de877e7aa27d1456ce3e 0.8s
 => => extracting sha256:ebcb99c48d8c8dd49d64a2d097966dacca7117b4381a54b6835b0afa487e9814 0.0s
 => => extracting sha256:9867e71b4ab60b84952cf76ca4f3446e994d0760b3d6e16658417c496656dca2 0.0s
 => => extracting sha256:936eb418164ae6e2bb965f03cb699d969f0ed568d0f965d6f276c944511dbcfb 0.0s
 => => extracting sha256:bc298e7adaf7d0aa550c78b610cf10f7c71a414aee30948f132c0102579a54b9 0.1s
 => => extracting sha256:ccd61b587bcd1e85123c101e6bff7ab461b1070ca1758f6f6c53862a2a7b9d7f 0.0s
 => => extracting sha256:b2d4b347f67cc1279b9a7c3643a07d465ab3eac59b96b593be3466220747fd4e 0.5s
 => => extracting sha256:56e9dde341528a1f5e6bde75bdc8679cb232d3d4c76c319bf4e6bfbaacf21ba7 0.0s
 => => extracting sha256:9ad99b17eb781e5b1e2d8d71ea4327547e082ceb262544e100bb077da9695e75 0.0s
 => [2/3] COPY index.php /var/www/html/index.php 0.1s
 => [3/3] RUN chmod a + rx index.php 0.3s
 => exporting to image 0.0s
 => => exporting layers 0.0s
 => => writing image sha256:d88b43e7a9aa1d7b6f83214777d0a7812c817a846ce5644a57e4775ec339ab4a 0.0s
 => => naming to docker.io/library/sc-hpa:1.0 0.0s

3. Create a linked mirror

Create a linked mirror hpa-example pointing to the sc-hpa mirror we created ourselves.

[root@jdmaster hpa]# docker tag sc-hpa:1.0 hpa-example:latest
[root@jdmaster hpa]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
hpa-example latest d88b43e7a9aa 39 seconds ago 355MB
sc-hpa 1.0 d88b43e7a9aa 39 seconds ago 355MB

4. Write php-apache.yaml

[root@jdmaster hpa]# vim php-apache.yaml
[root@jdmaster hpa]# kubectl apply -f php-apache.yaml
deployment.apps/php-apache created
service/php-apache created
php-apache.yaml content is as follows:
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: default
  name: php-apache
spec:
  selector:
    matchLabels:
      run: php-apache
  replicas: 1
  template:
    metadata:
      labels:
        run: php-apache
    spec:
      containers:
      - name: php-apache
        image: sc-hpa:1.0
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: 200m
          requests:
            cpu: 100m

---

apiVersion: v1
kind: Service
metadata:
  namespace: default
  name: php-apache
  labels:
    run: php-apache
spec:
  ports:
  - port: 80
  selector:
    run: php-apache

5. Export on the master, and then import to the node node

scp to each node node server to prevent the number of copies from starting up, and there is no mirror image on other node nodes.
Export:

[root@jdmaster hpa]# docker save >sc-hpa.tar sc-hpa:1.0
[root@jdmaster hpa]# ls
Dockerfile index.php php-apache.yaml sc-hpa.tar

Import at each node node:

node-1:

[root@jdnode-1 ~]# ls
anaconda-ks.cfg sc-hpa.tar
[root@jdnode-1 ~]# docker load < sc-hpa.tar

node-2:

[root@jdnode-2 ~]# ls
anaconda-ks.cfg sc-hpa.tar
[root@jdnode-2 ~]# docker load < sc-hpa.tar

6.scp to each node node server

scp to each node node server to prevent the number of copies from starting up, and there is no mirror image on other node nodes.

[root@jdmaster hpa]# scp sc-hpa.tar 192.168.1.8:/root
The authenticity of host '192.168.1.8 (192.168.1.8)' can't be established.
ECDSA key fingerprint is SHA256:Znyh8AmnI/EI61D4bduLFBETJD2ZVawH0txsmY1PCD4.
ECDSA key fingerprint is MD5:b9:ac:f0:02:09:27:1f:24:a4:de:e3:df:d8:1f:c1:42.
Are you sure you want to continue connecting (yes/no)?
Warning: Permanently added '192.168.1.8' (ECDSA) to the list of known hosts.
[email protected]'s password:
sc-hpa.tar 100% 347MB 110.2MB/s 00:03

[root@jdmaster hpa]# scp sc-hpa.tar 192.168.1.9:/root
The authenticity of host '192.168.1.9 (192.168.1.9)' can't be established.
ECDSA key fingerprint is SHA256:Znyh8AmnI/EI61D4bduLFBETJD2ZVawH0txsmY1PCD4.
ECDSA key fingerprint is MD5:b9:ac:f0:02:09:27:1f:24:a4:de:e3:df:d8:1f:c1:42.
Are you sure you want to continue connecting (yes/no)?
Warning: Permanently added '192.168.1.9' (ECDSA) to the list of known hosts.
[email protected]'s password:
sc-hpa.tar

Check:

[root@jdmaster hpa]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
php-apache-7c97954b84-fbgdq 1/1 Running 0 10m 10.244.2.15 jdnode-1 <none> <none>

7. Create hpa

[root@jdmaster hpa]# kubectl autoscale deployment php-apache --cpu-percent=20 --min=1 --max=10
horizontalpodautoscaler.autoscaling/php-apache-autoscaled
[root@jdmaster hpa]# kubectl get hpa
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
php-apache Deployment/php-apache <unknown>/20% 1 10 0 8s

Wait a little while unknown will become a number, it takes time to collect data.

[root@jdmaster hpa]# kubectl get hpa
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
php-apache Deployment/php-apache 1%/20% 1 10 1 46s

–cpu-percent 20% is 20% of the limit limit value of the pod, for example: the limit value is 200m * 20% = 40m

8. Test the pod and increase its load

[root@jdmaster hpa]# kubectl run -i --tty load-generator --rm --image=busybox:1.28 --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://10.244.2.15; done"


If you don't see a command prompt, try pressing enter.


OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK! OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!OK!^Cpod "load-generator" deleted
pod default/load-generator terminated (Error)

Open another master session (window) to view:

root@jdmaster hpa]# kubectl get hpa php-apache --watch


Stop the stress test, ctrl + c

Finally it’s over!
The content of aliyun-components.yaml is as follows:

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-view: "true"
  name: system:aggregated-metrics-reader
rules:
- apiGroups:
  - metrics.k8s.io
  resources:
  - pods
  -nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  -nodes/metrics
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - pods
  -nodes
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    k8s-app: metrics-server
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  ports:
  - name: https
    port: 443
    protocol: TCP
    targetPort: https
  selector:
    k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  strategy:
    rollingUpdate:
      maxUnavailable: 0
  template:
    metadata:
      labels:
        k8s-app: metrics-server
    spec:
      containers:
      - args:
        --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalIP
        - --cert-dir=/tmp
        - --secure-port=4443
        - --kubelet-preferred-address-types=InternalDNS,InternalIP,ExternalIP,Hostname
        - --kubelet-use-node-status-port
        - --metric-resolution=15s
        image: registry.aliyuncs.com/google_containers/metrics-server:v0.6.0
        imagePullPolicy: IfNotPresent
        livenessProbe:
          failureThreshold: 3
          httpGet:
            path: /livez
            port:https
            scheme: HTTPS
          periodSeconds: 10
        name: metrics-server
        ports:
        - containerPort: 4443
          name: https
          protocol: TCP
        readinessProbe:
          failureThreshold: 3
          httpGet:
            path: /readyz
            port:https
            scheme: HTTPS
          initialDelaySeconds: 20
          periodSeconds: 10
        resources:
          requests:
            cpu: 100m
            memory: 200Mi
        securityContext:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
          runAsNonRoot: true
          runAsUser: 1000
        volumeMounts:
        - mountPath: /tmp
          name: tmp-dir
      nodeSelector:
        kubernetes.io/os:linux
      priorityClassName: system-cluster-critical
      serviceAccountName: metrics-server
      volumes:
      - emptyDir: {<!-- -->}
        name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
  labels:
    k8s-app: metrics-server
  name: v1beta1.metrics.k8s.io
spec:
  group: metrics.k8s.io
  groupPriorityMinimum: 100
  insecureSkipTLSVerify: true
  service:
    name: metrics-server
    namespace: kube-system
  version: v1beta1
  versionPriority: 100