diff --git a/Docker/index.html b/Docker/index.html new file mode 100644 index 0000000..7a25024 --- /dev/null +++ b/Docker/index.html @@ -0,0 +1,127 @@ + + + + + + + + +
+

About Us Page

+

Some text about who we are and what we do.

+

Resize the browser window to see that this page is responsive by the way.

+
+ +

Our Team

+
+
+
+ Jane +
+

Jane Doe

+

CEO & Founder

+

Some text that describes me lorem ipsum ipsum lorem.

+

jane@example.com

+

+
+
+
+ +
+
+ Mike +
+

Mike Ross

+

Art Director

+

Some text that describes me lorem ipsum ipsum lorem.

+

mike@example.com

+

+
+
+
+ +
+
+ John +
+

John Doe

+

Designer

+

Some text that describes me lorem ipsum ipsum lorem.

+

john@example.com

+

+
+
+
+
+ + + diff --git a/JENKINS/LINUXSLAVE b/JENKINS/LINUXSLAVE new file mode 100644 index 0000000..aea3ee9 --- /dev/null +++ b/JENKINS/LINUXSLAVE @@ -0,0 +1,29 @@ +LINUX SLAVE +************************* +create root directory + +cd /tmp +mkdir jenkisndir + +give permission to the directory + +sudo chmod -R 777 /tmp/jenkinsdir + +Go to master +************* + +Go to node + +give following informtion under Launch Method + +how to connetc to master- select ssh + +host: private ip + +provide credentials + +also set +Host key verfication as: +Non verifying Verification startegy + +save it diff --git a/JENKINS/index.html b/JENKINS/index.html new file mode 100644 index 0000000..7a25024 --- /dev/null +++ b/JENKINS/index.html @@ -0,0 +1,127 @@ + + + + + + + + +
+

About Us Page

+

Some text about who we are and what we do.

+

Resize the browser window to see that this page is responsive by the way.

+
+ +

Our Team

+
+
+
+ Jane +
+

Jane Doe

+

CEO & Founder

+

Some text that describes me lorem ipsum ipsum lorem.

+

jane@example.com

+

+
+
+
+ +
+
+ Mike +
+

Mike Ross

+

Art Director

+

Some text that describes me lorem ipsum ipsum lorem.

+

mike@example.com

+

+
+
+
+ +
+
+ John +
+

John Doe

+

Designer

+

Some text that describes me lorem ipsum ipsum lorem.

+

john@example.com

+

+
+
+
+
+ + + diff --git a/Kubernetese/Kubernetes_setup.txt b/Kubernetese/Kubernetes_setup.txt new file mode 100644 index 0000000..d26e741 --- /dev/null +++ b/Kubernetese/Kubernetes_setup.txt @@ -0,0 +1,60 @@ +Install Kubernetes on Ubuntu 18.04 LTS + + +********************* +Step1: On All Machines ( Master & All nodes ): +********************** +### INSTALL DOCKER + +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common + +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +sudo apt-get update ; clear +sudo apt-get install -y docker-ce +sudo service docker start ; clear + +### INSTALL KUBEADM,KUBELET,KUBECTL + +echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +sudo apt-get update ; clear +sudo apt-get install -y kubelet kubeadm kubectl + + +************************ +Step2: On Master only: +*********************** + +sudo kubeadm init --ignore-preflight-errors=all + +sudo mkdir -p $HOME/.kube +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config +sudo chown $(id -u):$(id -g) $HOME/.kube/config + +## Weave +********************** +kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" + +kubectl get nodes +kubectl get all --all-namespaces + + +********************** +Step3: On Nodes only: +********************** +copy the kubeadm join token from master & run it on all nodes + +Ex: kubeadm join 10.128.15.231:6443 --token mks3y2.v03tyyru0gy12mbt \ + --discovery-token-ca-cert-hash sha256:3de23d42c7002be0893339fbe558ee75e14399e11f22e3f0b34351077b7c4b56 + + +how to find kubeadm join token later +**************************** +kubeadm token create --print-join-command --ttl=0 + + + + + diff --git a/Kubernetese/Kubernetes_setup_FINAL.txt b/Kubernetese/Kubernetes_setup_FINAL.txt deleted file mode 100644 index bf5d7cf..0000000 --- a/Kubernetese/Kubernetes_setup_FINAL.txt +++ /dev/null @@ -1,82 +0,0 @@ -Install, start and enable docker service - -yum install -y -q yum-utils device-mapper-persistent-data lvm2 > /dev/null 2>&1 -yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo > /dev/null 2>&1 -yum install -y -q docker-ce >/dev/null 2>&1 - - -systemctl start docker -systemctl enable docker -====================================================================== -Update sysctl settings for Kubernetes networking - -cat >>/etc/sysctl.d/kubernetes.conf<>/etc/yum.repos.d/kubernetes.repo< Clone this repository and install metrics server. Please do note that this setup is good for dev/qa environment. A lot of considerations must be put while installing metrics server in production environment. The official metrics-server repository is kept at https://github.com/kubernetes-incubator/metrics-server and we are using the same repo with few changes. + +> Install the metrics server + +` cd metrics-server` + +` kubectl create -f . ` + + +## Create nginx deployment, service & hpa + +> It is mandatory to set requests on cpu utilization as HPA requires CPU metrics. + +` kubectl create -f hpa.yaml` + +~~~ +kubectl get hpa +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +nginx Deployment/nginx 0%/40% 3 5 3 55s +~~~ + +## Test the HPA using apache bench + +> Apache Bench (ab) is a load testing and benchmarking tool for Hypertext Transfer Protocol (HTTP) server. It can be run from command line and it is very simple to use. A quick load testing output can be obtained in just one minute + +### install apache benchmark tool + +` apt-get install apache2-utils` + +#### Get the service IP address using + +`kubectl get svc` + +### send load on to the PODS + +` ab -n 500000 -c 1000 http://10.97.161.152/` + +` -n requests Number of requests to perform` + +` -c concurrency Number of multiple requests to make at a time` diff --git a/Kubernetese/hpa/hpa.yml b/Kubernetese/hpa/hpa.yml new file mode 100644 index 0000000..5220b33 --- /dev/null +++ b/Kubernetese/hpa/hpa.yml @@ -0,0 +1,55 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + labels: + app: nginx +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + name: nginxpod + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:latest + resources: + limits: + cpu: 20m ## 10% of 1 core on your system + +--- + +apiVersion: v1 +kind: Service +metadata: + name: nginx-svc +spec: + type: ClusterIP ## this is default if we do not type in service definition + selector: + app: nginx + ports: + - protocol: TCP + port: 80 + targetPort: 80 + +--- + +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: nginx-hpa +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: nginx + minReplicas: 1 + maxReplicas: 10 + targetCPUUtilizationPercentage: 10 + diff --git a/Kubernetese/hpa/hpa1.md b/Kubernetese/hpa/hpa1.md new file mode 100644 index 0000000..06e72f6 --- /dev/null +++ b/Kubernetese/hpa/hpa1.md @@ -0,0 +1,453 @@ +Horizontal Pod Autoscaler automatically scales the number of pods +in a replication controller, deployment, replica set or stateful set based on observed CPU utilization +(or, with beta support, on some other, application-provided metrics). + +This document walks you through an example of enabling Horizontal Pod Autoscaler for the php-apache server. For more information on how Horizontal Pod Autoscaler behaves, see the [Horizontal Pod Autoscaler user guide](/docs/tasks/run-application/horizontal-pod-autoscale/). + +This example requires a running Kubernetes cluster and kubectl, version 1.2 or later. +[metrics-server](https://github.com/kubernetes-incubator/metrics-server/) monitoring needs to be deployed in the cluster +to provide metrics via the resource metrics API, as Horizontal Pod Autoscaler uses this API to collect metrics. The instructions for deploying this are on the GitHub repository of [metrics-server](https://github.com/kubernetes-incubator/metrics-server/), if you followed [getting started on GCE guide](/docs/setup/production-environment/turnkey/gce/), +metrics-server monitoring will be turned-on by default. + +To specify multiple resource metrics for a Horizontal Pod Autoscaler, you must have a Kubernetes cluster +and kubectl at version 1.6 or later. Furthermore, in order to make use of custom metrics, your cluster +must be able to communicate with the API server providing the custom metrics API. Finally, to use metrics +not related to any Kubernetes object you must have a Kubernetes cluster at version 1.10 or later, and +you must be able to communicate with the API server that provides the external metrics API. +See the [Horizontal Pod Autoscaler user guide](/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics) for more details. + +## Run & expose php-apache server + +To demonstrate Horizontal Pod Autoscaler we will use a custom docker image based on the php-apache image. +The Dockerfile has the following content: + +``` +FROM php:5-apache +ADD index.php /var/www/html/index.php +RUN chmod a+rx index.php +``` + +It defines an index.php page which performs some CPU intensive computations: + +``` + +``` + +First, we will start a deployment running the image and expose it as a service: + +```shell +kubectl run php-apache --image=k8s.gcr.io/hpa-example --requests=cpu=200m --limits=cpu=500m --expose --port=80 +``` +``` +service/php-apache created +deployment.apps/php-apache created +``` + +## Create Horizontal Pod Autoscaler + +Now that the server is running, we will create the autoscaler using +[kubectl autoscale](/docs/reference/generated/kubectl/kubectl-commands#autoscale). +The following command will create a Horizontal Pod Autoscaler that maintains between 1 and 10 replicas of the Pods +controlled by the php-apache deployment we created in the first step of these instructions. +Roughly speaking, HPA will increase and decrease the number of replicas +(via the deployment) to maintain an average CPU utilization across all Pods of 50% +(since each pod requests 200 milli-cores by [kubectl run](https://github.com/kubernetes/kubernetes/blob/{{< param "githubbranch" >}}/docs/user-guide/kubectl/kubectl_run.md), this means average CPU usage of 100 milli-cores). +See [here](/docs/tasks/run-application/horizontal-pod-autoscale/#algorithm-details) for more details on the algorithm. + +```shell +kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10 +``` +``` +horizontalpodautoscaler.autoscaling/php-apache autoscaled +``` + +We may check the current status of autoscaler by running: + +```shell +kubectl get hpa +``` +``` +NAME REFERENCE TARGET MINPODS MAXPODS REPLICAS AGE +php-apache Deployment/php-apache/scale 0% / 50% 1 10 1 18s + +``` + +Please note that the current CPU consumption is 0% as we are not sending any requests to the server +(the ``TARGET`` column shows the average across all the pods controlled by the corresponding deployment). + +## Increase load + +Now, we will see how the autoscaler reacts to increased load. +We will start a container, and send an infinite loop of queries to the php-apache service (please run it in a different terminal): + +```shell +kubectl run --generator=run-pod/v1 -it --rm load-generator --image=busybox /bin/sh + +Hit enter for command prompt + +while true; do wget -q -O- http://php-apache.default.svc.cluster.local; done +``` + +Within a minute or so, we should see the higher CPU load by executing: + +```shell +kubectl get hpa +``` +``` +NAME REFERENCE TARGET MINPODS MAXPODS REPLICAS AGE +php-apache Deployment/php-apache/scale 305% / 50% 1 10 1 3m + +``` + +Here, CPU consumption has increased to 305% of the request. +As a result, the deployment was resized to 7 replicas: + +```shell +kubectl get deployment php-apache +``` +``` +NAME READY UP-TO-DATE AVAILABLE AGE +php-apache 7/7 7 7 19m +``` + +## Note +It may take a few minutes to stabilize the number of replicas. Since the amount +of load is not controlled in any way it may happen that the final number of replicas +will differ from this example. + +## Stop load + +We will finish our example by stopping the user load. + +In the terminal where we created the container with `busybox` image, terminate +the load generation by typing ` + C`. + +Then we will verify the result state (after a minute or so): + +```shell +kubectl get hpa +``` +``` +NAME REFERENCE TARGET MINPODS MAXPODS REPLICAS AGE +php-apache Deployment/php-apache/scale 0% / 50% 1 10 1 11m +``` + +```shell +kubectl get deployment php-apache +``` +``` +NAME READY UP-TO-DATE AVAILABLE AGE +php-apache 1/1 1 1 27m +``` + +Here CPU utilization dropped to 0, and so HPA autoscaled the number of replicas back down to 1. + +## Note +Autoscaling the replicas may take a few minutes. + + +## Autoscaling on multiple metrics and custom metrics + +You can introduce additional metrics to use when autoscaling the `php-apache` Deployment +by making use of the `autoscaling/v2beta2` API version. + +First, get the YAML of your HorizontalPodAutoscaler in the `autoscaling/v2beta2` form: + +```shell +kubectl get hpa.v2beta2.autoscaling -o yaml > /tmp/hpa-v2.yaml +``` + +Open the `/tmp/hpa-v2.yaml` file in an editor, and you should see YAML which looks like this: + +```yaml +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache + namespace: default +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: php-apache + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 +status: + observedGeneration: 1 + lastScaleTime: + currentReplicas: 1 + desiredReplicas: 1 + currentMetrics: + - type: Resource + resource: + name: cpu + current: + averageUtilization: 0 + averageValue: 0 +``` + +Notice that the `targetCPUUtilizationPercentage` field has been replaced with an array called `metrics`. +The CPU utilization metric is a *resource metric*, since it is represented as a percentage of a resource +specified on pod containers. Notice that you can specify other resource metrics besides CPU. By default, +the only other supported resource metric is memory. These resources do not change names from cluster +to cluster, and should always be available, as long as the `metrics.k8s.io` API is available. + +You can also specify resource metrics in terms of direct values, instead of as percentages of the +requested value, by using a `target` type of `AverageValue` instead of `AverageUtilization`, and +setting the corresponding `target.averageValue` field instead of the `target.averageUtilization`. + +There are two other types of metrics, both of which are considered *custom metrics*: pod metrics and +object metrics. These metrics may have names which are cluster specific, and require a more +advanced cluster monitoring setup. + +The first of these alternative metric types is *pod metrics*. These metrics describe pods, and +are averaged together across pods and compared with a target value to determine the replica count. +They work much like resource metrics, except that they *only* support a `target` type of `AverageValue`. + +Pod metrics are specified using a metric block like this: + +```yaml +type: Pods +pods: + metric: + name: packets-per-second + target: + type: AverageValue + averageValue: 1k +``` + +The second alternative metric type is *object metrics*. These metrics describe a different +object in the same namespace, instead of describing pods. The metrics are not necessarily +fetched from the object; they only describe it. Object metrics support `target` types of +both `Value` and `AverageValue`. With `Value`, the target is compared directly to the returned +metric from the API. With `AverageValue`, the value returned from the custom metrics API is divided +by the number of pods before being compared to the target. The following example is the YAML +representation of the `requests-per-second` metric. + +```yaml +type: Object +object: + metric: + name: requests-per-second + describedObject: + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + name: main-route + target: + type: Value + value: 2k +``` + +If you provide multiple such metric blocks, the HorizontalPodAutoscaler will consider each metric in turn. +The HorizontalPodAutoscaler will calculate proposed replica counts for each metric, and then choose the +one with the highest replica count. + +For example, if you had your monitoring system collecting metrics about network traffic, +you could update the definition above using `kubectl edit` to look like this: + +```yaml +apiVersion: autoscaling/v2beta2 +kind: HorizontalPodAutoscaler +metadata: + name: php-apache + namespace: default +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: php-apache + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + - type: Pods + pods: + metric: + name: packets-per-second + target: + type: AverageValue + averageValue: 1k + - type: Object + object: + metric: + name: requests-per-second + describedObject: + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + name: main-route + target: + type: Value + value: 10k +status: + observedGeneration: 1 + lastScaleTime: + currentReplicas: 1 + desiredReplicas: 1 + currentMetrics: + - type: Resource + resource: + name: cpu + current: + averageUtilization: 0 + averageValue: 0 + - type: Object + object: + metric: + name: requests-per-second + describedObject: + apiVersion: networking.k8s.io/v1beta1 + kind: Ingress + name: main-route + current: + value: 10k +``` + +Then, your HorizontalPodAutoscaler would attempt to ensure that each pod was consuming roughly +50% of its requested CPU, serving 1000 packets per second, and that all pods behind the main-route +Ingress were serving a total of 10000 requests per second. + +### Autoscaling on more specific metrics + +Many metrics pipelines allow you to describe metrics either by name or by a set of additional +descriptors called _labels_. For all non-resource metric types (pod, object, and external, +described below), you can specify an additional label selector which is passed to your metric +pipeline. For instance, if you collect a metric `http_requests` with the `verb` +label, you can specify the following metric block to scale only on GET requests: + +```yaml +type: Object +object: + metric: + name: `http_requests` + selector: `verb=GET` +``` + +This selector uses the same syntax as the full Kubernetes label selectors. The monitoring pipeline +determines how to collapse multiple series into a single value, if the name and selector +match multiple series. The selector is additive, and cannot select metrics +that describe objects that are **not** the target object (the target pods in the case of the `Pods` +type, and the described object in the case of the `Object` type). + +### Autoscaling on metrics not related to Kubernetes objects + +Applications running on Kubernetes may need to autoscale based on metrics that don't have an obvious +relationship to any object in the Kubernetes cluster, such as metrics describing a hosted service with +no direct correlation to Kubernetes namespaces. In Kubernetes 1.10 and later, you can address this use case +with *external metrics*. + +Using external metrics requires knowledge of your monitoring system; the setup is +similar to that required when using custom metrics. External metrics allow you to autoscale your cluster +based on any metric available in your monitoring system. Just provide a `metric` block with a +`name` and `selector`, as above, and use the `External` metric type instead of `Object`. +If multiple time series are matched by the `metricSelector`, +the sum of their values is used by the HorizontalPodAutoscaler. +External metrics support both the `Value` and `AverageValue` target types, which function exactly the same +as when you use the `Object` type. + +For example if your application processes tasks from a hosted queue service, you could add the following +section to your HorizontalPodAutoscaler manifest to specify that you need one worker per 30 outstanding tasks. + +```yaml +- type: External + external: + metric: + name: queue_messages_ready + selector: "queue=worker_tasks" + target: + type: AverageValue + averageValue: 30 +``` + +When possible, it's preferable to use the custom metric target types instead of external metrics, since it's +easier for cluster administrators to secure the custom metrics API. The external metrics API potentially allows +access to any metric, so cluster administrators should take care when exposing it. + +## Appendix: Horizontal Pod Autoscaler Status Conditions + +When using the `autoscaling/v2beta2` form of the HorizontalPodAutoscaler, you will be able to see +*status conditions* set by Kubernetes on the HorizontalPodAutoscaler. These status conditions indicate +whether or not the HorizontalPodAutoscaler is able to scale, and whether or not it is currently restricted +in any way. + +The conditions appear in the `status.conditions` field. To see the conditions affecting a HorizontalPodAutoscaler, +we can use `kubectl describe hpa`: + +```shell +kubectl describe hpa cm-test +``` +```shell +Name: cm-test +Namespace: prom +Labels: +Annotations: +CreationTimestamp: Fri, 16 Jun 2017 18:09:22 +0000 +Reference: ReplicationController/cm-test +Metrics: ( current / target ) + "http_requests" on pods: 66m / 500m +Min replicas: 1 +Max replicas: 4 +ReplicationController pods: 1 current / 1 desired +Conditions: + Type Status Reason Message + ---- ------ ------ ------- + AbleToScale True ReadyForNewScale the last scale time was sufficiently old as to warrant a new scale + ScalingActive True ValidMetricFound the HPA was able to successfully calculate a replica count from pods metric http_requests + ScalingLimited False DesiredWithinRange the desired replica count is within the acceptable range +Events: +``` + +For this HorizontalPodAutoscaler, we can see several conditions in a healthy state. The first, +`AbleToScale`, indicates whether or not the HPA is able to fetch and update scales, as well as +whether or not any backoff-related conditions would prevent scaling. The second, `ScalingActive`, +indicates whether or not the HPA is enabled (i.e. the replica count of the target is not zero) and +is able to calculate desired scales. When it is `False`, it generally indicates problems with +fetching metrics. Finally, the last condition, `ScalingLimited`, indicates that the desired scale +was capped by the maximum or minimum of the HorizontalPodAutoscaler. This is an indication that +you may wish to raise or lower the minimum or maximum replica count constraints on your +HorizontalPodAutoscaler. + +## Appendix: Quantities + +All metrics in the HorizontalPodAutoscaler and metrics APIs are specified using +a special whole-number notation known in Kubernetes as a *quantity*. For example, +the quantity `10500m` would be written as `10.5` in decimal notation. The metrics APIs +will return whole numbers without a suffix when possible, and will generally return +quantities in milli-units otherwise. This means you might see your metric value fluctuate +between `1` and `1500m`, or `1` and `1.5` when written in decimal notation. See the +[glossary entry on quantities](/docs/reference/glossary?core-object=true#term-quantity) for more information. + +## Appendix: Other possible scenarios + +### Creating the autoscaler declaratively + +Instead of using `kubectl autoscale` command to create a HorizontalPodAutoscaler imperatively we +can use the following file to create it declaratively: + +{{< codenew file="application/hpa/php-apache.yaml" >}} + +We will create the autoscaler by executing the following command: + +```shell +kubectl create -f https://k8s.io/examples/application/hpa/php-apache.yaml +``` +``` +horizontalpodautoscaler.autoscaling/php-apache created +``` diff --git a/Kubernetese/hpa/metrics-server/aggregated-metrics-reader.yaml b/Kubernetese/hpa/metrics-server/aggregated-metrics-reader.yaml new file mode 100644 index 0000000..0a0e159 --- /dev/null +++ b/Kubernetese/hpa/metrics-server/aggregated-metrics-reader.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:aggregated-metrics-reader + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: +- apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] diff --git a/Kubernetese/hpa/metrics-server/auth-delegator.yaml b/Kubernetese/hpa/metrics-server/auth-delegator.yaml new file mode 100644 index 0000000..87909da --- /dev/null +++ b/Kubernetese/hpa/metrics-server/auth-delegator.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/Kubernetese/hpa/metrics-server/auth-reader.yaml b/Kubernetese/hpa/metrics-server/auth-reader.yaml new file mode 100644 index 0000000..062afa8 --- /dev/null +++ b/Kubernetese/hpa/metrics-server/auth-reader.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/Kubernetese/hpa/metrics-server/metrics-apiservice.yaml b/Kubernetese/hpa/metrics-server/metrics-apiservice.yaml new file mode 100644 index 0000000..08b0530 --- /dev/null +++ b/Kubernetese/hpa/metrics-server/metrics-apiservice.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io +spec: + service: + name: metrics-server + namespace: kube-system + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 diff --git a/Kubernetese/hpa/metrics-server/metrics-server-deployment.yaml b/Kubernetese/hpa/metrics-server/metrics-server-deployment.yaml new file mode 100644 index 0000000..debcdbb --- /dev/null +++ b/Kubernetese/hpa/metrics-server/metrics-server-deployment.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metrics-server + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metrics-server + namespace: kube-system + labels: + k8s-app: metrics-server +spec: + selector: + matchLabels: + k8s-app: metrics-server + template: + metadata: + name: metrics-server + labels: + k8s-app: metrics-server + spec: + serviceAccountName: metrics-server + volumes: + # mount in tmp so we can safely use from-scratch images and/or read-only containers + - name: tmp-dir + emptyDir: {} + containers: + - name: metrics-server + image: k8s.gcr.io/metrics-server-amd64:v0.3.6 + args: + - /metrics-server + - --kubelet-insecure-tls + - --kubelet-preferred-address-types=InternalIP + imagePullPolicy: Always + volumeMounts: + - name: tmp-dir + mountPath: /tmp + hostNetwork: true diff --git a/Kubernetese/hpa/metrics-server/metrics-server-service.yaml b/Kubernetese/hpa/metrics-server/metrics-server-service.yaml new file mode 100644 index 0000000..ddf6f4a --- /dev/null +++ b/Kubernetese/hpa/metrics-server/metrics-server-service.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" + kubernetes.io/cluster-service: "true" +spec: + selector: + k8s-app: metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 diff --git a/Kubernetese/hpa/metrics-server/resource-reader.yaml b/Kubernetese/hpa/metrics-server/resource-reader.yaml new file mode 100644 index 0000000..52cf808 --- /dev/null +++ b/Kubernetese/hpa/metrics-server/resource-reader.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + - namespaces + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system diff --git a/Kubernetese/hpa/metrics-server/rm-metrics-server.sh b/Kubernetese/hpa/metrics-server/rm-metrics-server.sh new file mode 100644 index 0000000..e8380cc --- /dev/null +++ b/Kubernetese/hpa/metrics-server/rm-metrics-server.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +kubectl -n kube-system delete service/metrics-server +kubectl -n kube-system delete deployment.apps/metrics-server +kubectl -n kube-system delete serviceaccount/metrics-server +kubectl -n kube-system delete rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader +kubectl delete apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io +kubectl delete clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader +kubectl delete clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator +kubectl delete clusterrole.rbac.authorization.k8s.io/system:metrics-server +kubectl delete clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server