目标

  • 使用容器完成 metrics-server 部署
  • 演示 Horizontal Pod Autoscaling (pod 自动缩放)

部署 metrics-server 服务

  • Horizontal Pod Autoscaler(HPA)控制器用于实现基于CPU使用率进行自动Pod伸缩的功能。
  • HPA控制器基于Master的kube-controller-manager服务启动参数–horizontal-pod-autoscaler-sync-period定义是时长(默认30秒),周期性监控目标Pod的CPU使用率,并在满足条件时对ReplicationController或Deployment中的Pod副本数进行调整,以符合用户定义的平均Pod CPU使用率。
  • 在新版本的kubernetes中 Pod CPU使用率不在来源于heapster,而是来自于metrics-server
  • 官网原话是 The --horizontal-pod-autoscaler-use-rest-clients is true or unset. Setting this to false switches to Heapster-based autoscaling, which is deprecated.
  • yml 文件来自于github https://github.com/kubernetes-incubator/metrics-server/tree/master/deploy/1.8+
  • /etc/kubernetes/pki/front-proxy-ca.pem 文件来自于部署kubernetes集群
  • 需要对yml文件进行修改才可使用 改动如下
  • 利用Flags-horizontal-pod-autoscaler-sync-period确定hPa对于Pods组指标的监控频率。默认的周期为30秒。
  • 两次扩展操作之间的默认间隔为3分钟,可以Flags来控制-horizontal-pod-autoscaler-upscale-delay
  • 两个缩小操作之间的默认间隔为5分钟,同样可以通过Flags来控制-horizontal-pod-autoscaler-downscale-delay
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
      - name: metrics-server 
        image: freemanliu/metrics-server-amd64:v0.2.1 # gcr 被墙了替换镜像
        imagePullPolicy: Always
        volumeMounts:
        - mountPath: /etc/kubernetes/pki  
          name: ca-ssl
        command:
        - /metrics-server
        - --source=kubernetes.summary_api:''
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem 
        - --requestheader-username-headers=X-Remote-User 
        - --requestheader-group-headers=X-Remote-Group 
        - --requestheader-extra-headers-prefix=X-Remote-Extra-
      volumes:
      - name: ca-ssl
        hostPath:
         path: /etc/kubernetes/pki
  • 这里将github提供的多个文件合并为一个方便一些
  • kubectl apply -f metrics-server.yml 进行部署
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# metrics-server.yml
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
  name: v1beta1.metrics.k8s.io
spec:
  service:
    name: metrics-server
    namespace: kube-system
  group: metrics.k8s.io
  version: v1beta1
  insecureSkipTLSVerify: true
  groupPriorityMinimum: 100
  versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metrics-server
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    k8s-app: metrics-server
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      serviceAccountName: metrics-server
      containers:
      - name: metrics-server
        image: freemanliu/google_containers-metrics-server-amd64:v0.2.1
        imagePullPolicy: Always
        volumeMounts:
        - mountPath: /etc/kubernetes/pki
          name: ca-ssl
        command:
        - /metrics-server
        - --source=kubernetes.summary_api:''
        - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
        - --requestheader-username-headers=X-Remote-User 
        - --requestheader-group-headers=X-Remote-Group 
        - --requestheader-extra-headers-prefix=X-Remote-Extra-
      volumes:
      - name: ca-ssl
        hostPath:
         path: /etc/kubernetes/pki
---
apiVersion: v1
kind: Service
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    kubernetes.io/name: "Metrics-server"
spec:
  selector:
    k8s-app: metrics-server
  ports:
  - port: 443
    protocol: TCP
    targetPort: 443
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  - namespaces
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - "extensions"
  resources:
  - deployments
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
 
  • 测试metrics-server 是否部署成功
 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
kubectl get --raw /apis/metrics.k8s.io/v1beta1
{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"nodes","singularName":"","namespaced":false,"kind":"NodeMetrics","verbs":["get","list"]},{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}

[root@k8s-m1 ~]# kubectl top node
NAME      CPU(cores)   CPU%      MEMORY(bytes)   MEMORY%   
k8s-m1    113m         2%        1080Mi          14%       
k8s-m2    133m         3%        1086Mi          14%       
k8s-m3    100m         2%        1029Mi          13%       
k8s-n1    146m         3%        403Mi           5%        
k8s-n2    50m          1%        387Mi           5%        
k8s-n3    38m          0%        284Mi           3% 

部署HPA实验案列

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# 创建 pod 和 service
[root@k8s-m1 ~]#  kubectl run php-apache --image=freemanliu/hpa-example --requests=cpu=200m --expose --port=80
service "php-apache" created
deployment "php-apache" created

# 创建 autoscaler
[root@k8s-m1 ~]# kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
deployment "php-apache" autoscaled

# 查看HPA 
[root@k8s-m1 ~]# kubectl get hpa
NAME         REFERENCE               TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
php-apache   Deployment/php-apache   0%/50%   1         10        1          2h

[root@k8s-m1 ~]# kubectl describe hpa
Name:                                                  php-apache
Namespace:                                             default
Labels:                                                <none>
Annotations:                                           <none>
CreationTimestamp:                                     Thu, 02 Aug 2018 11:37:26 +0800
Reference:                                             Deployment/php-apache
Metrics:                                               ( current / target )
  resource cpu on pods  (as a percentage of request):  45% (90m) / 50%
Min replicas:                                          1
Max replicas:                                          10
Deployment pods:                                       1 current / 1 desired
Conditions:
  Type            Status  Reason              Message
  ----            ------  ------              -------
  AbleToScale     True    ReadyForNewScale    the last scale time was sufficiently old as to warrant a new scale
  ScalingActive   True    ValidMetricFound    the HPA was able to successfully calculate a replica count from cpu resource utilization (percentage of request)
  
  # 增加负载
[root@k8s-m1 ~]#  kubectl run -i --tty load-generator --image=busybox /bin/sh

   while true; do wget -q -O- http://php-apache.default.svc.cluster.local; done
  
  # 过一会儿在查看 hpa 可以看到CPU负载升高了
[root@k8s-m1 ~]# kubectl get hpa
NAME         REFERENCE               TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
php-apache   Deployment/php-apache   387%/50%   1         10        1          2h

# 在等30s左右 在查看HPA 可以发现已经进行自动扩容了
[root@k8s-m1 ~]# kubectl get hpa
NAME         REFERENCE               TARGETS    MINPODS   MAXPODS   REPLICAS   AGE
php-apache   Deployment/php-apache   270%/50%   1         10        4          2h

# 查看depoment 已经扩展到了4个了
[root@k8s-m1 ~]# kubectl get deployment php-apache
NAME         DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
php-apache   4         4         4            4           2h

# 此时停止掉循环 过一会儿会发现 php-apache pod 数量又降下来了 自动缩减了pod数量

# 清理 php-apache
kubectl delete deployment/php-apache
kubectl delete service/php-apach
kubectl delete hpa php-apache

参考资料