From 7811eab9e4256397319d3f84a58035c417eb561d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 2 Dec 2020 16:16:02 -0800 Subject: [PATCH 001/175] adding SA changes --- kubernetes/linux/main.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index a2ba6a1d1..2ff5f7c06 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -19,6 +19,20 @@ mkdir -p /var/opt/microsoft/docker-cimprov/state #sudo setfacl -m user:omsagent:rw /var/run/host/docker.sock #fi +#Setting Application Insights key and endpoint +if [ ! -z "$AI_KEY_URL" ]; then + echo "Getting AI key from Storage account" + aiIkey=$(curl --max-time 10 $AI_KEY_URL) + echo "export APPLICATIONINSIGHTS_AUTH=$aiIkey" >> ~/.bashrc +fi +if [ ! -z "$AI_ENDPOINT_URL" ]; then + echo "Getting AI endpoint from Storage account" + aiEndpoint=$(curl --max-time 10 $AI_ENDPOINT_URL) + echo "export APPLICATIONINSIGHTS_ENDPOINT=$aiEndpoint" >> ~/.bashrc +fi +source ~/.bashrc + + # add permissions for omsagent user to access azure.json. sudo setfacl -m user:omsagent:r /etc/kubernetes/host/azure.json From 2a2e62e1619ee9ae2ca10115dfd897dcce8e2990 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 5 Jan 2021 16:48:15 -0800 Subject: [PATCH 002/175] changes for new container --- kubernetes/omsagent.yaml | 132 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 2155361e9..897d72cbe 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -471,6 +471,138 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: omsagent-prometheus + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 250m + memory: 600Mi + requests: + cpu: 75m + memory: 225Mi + env: + # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these + - name: AKS_RESOURCE_ID + value: "VALUE_AKS_RESOURCE_ID_VALUE" + - name: AKS_REGION + value: "VALUE_AKS_RESOURCE_REGION_VALUE" + #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters + #- name: ACS_RESOURCE_NAME + # value: "my_acs_cluster_name" + - name: CONTROLLER_TYPE + value: "DaemonSet-Prometheus" + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + # Update this with the user assigned msi client id for omsagent + - name: USER_ASSIGNED_IDENTITY_CLIENT_ID + value: "" + - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS + value: "koreacentral,norwayeast" + securityContext: + privileged: true + ports: + - containerPort: 25225 + protocol: TCP + - containerPort: 25224 + protocol: UDP + volumeMounts: + - mountPath: /hostfs + name: host-root + readOnly: true + - mountPath: /var/run/host + name: docker-sock + - mountPath: /var/log + name: host-log + - mountPath: /var/lib/docker/containers + name: containerlog-path + readOnly: true + - mountPath: /mnt/docker + name: containerlog-path-2 + readOnly: true + - mountPath: /mnt/containers + name: containerlog-path-3 + readOnly: true + - mountPath: /etc/kubernetes/host + name: azure-json-path + - mountPath: /etc/omsagent-secret + name: omsagent-secret + readOnly: true + - mountPath: /etc/config/settings + name: settings-vol-config + readOnly: true + - mountPath: /etc/config/settings/adx + name: omsagent-adx-secret + readOnly: true + #livenessProbe: + #exec: + #command: + #- /bin/bash + #- -c + #- /opt/livenessprobe.sh + #initialDelaySeconds: 60 + #periodSeconds: 60 + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - labelSelector: + matchExpressions: + # kubernetes.io/os label doesnt exist in k8s versions < 1.14 so make sure to choose label based on k8s version in aks yaml + - key: kubernetes.io/os + operator: In + values: + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet + # Tolerate a NoSchedule taint on master that ACS Engine sets. + tolerations: + - operator: "Exists" + effect: "NoSchedule" + - operator: "Exists" + effect: "NoExecute" + - operator: "Exists" + effect: "PreferNoSchedule" + volumes: + - name: host-root + hostPath: + path: / + - name: docker-sock + hostPath: + path: /var/run + - name: container-hostname + hostPath: + path: /etc/hostname + - name: host-log + hostPath: + path: /var/log + - name: containerlog-path + hostPath: + path: /var/lib/docker/containers + - name: containerlog-path-2 + hostPath: + path: /mnt/docker + - name: containerlog-path-3 + hostPath: + path: /mnt/containers + - name: azure-json-path + hostPath: + path: /etc/kubernetes + - name: omsagent-secret + secret: + secretName: omsagent-secret + - name: settings-vol-config + configMap: + name: container-azm-ms-agentconfig + optional: true + - name: omsagent-adx-secret + secret: + secretName: omsagent-adx-secret + optional: true --- apiVersion: apps/v1 kind: Deployment From d52c92136a9d792463b74100fd66749af5d96f43 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 5 Jan 2021 16:49:31 -0800 Subject: [PATCH 003/175] reverting SA changes from this branch --- kubernetes/linux/main.sh | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 2ff5f7c06..ed16d3e32 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -19,20 +19,6 @@ mkdir -p /var/opt/microsoft/docker-cimprov/state #sudo setfacl -m user:omsagent:rw /var/run/host/docker.sock #fi -#Setting Application Insights key and endpoint -if [ ! -z "$AI_KEY_URL" ]; then - echo "Getting AI key from Storage account" - aiIkey=$(curl --max-time 10 $AI_KEY_URL) - echo "export APPLICATIONINSIGHTS_AUTH=$aiIkey" >> ~/.bashrc -fi -if [ ! -z "$AI_ENDPOINT_URL" ]; then - echo "Getting AI endpoint from Storage account" - aiEndpoint=$(curl --max-time 10 $AI_ENDPOINT_URL) - echo "export APPLICATIONINSIGHTS_ENDPOINT=$aiEndpoint" >> ~/.bashrc -fi -source ~/.bashrc - - # add permissions for omsagent user to access azure.json. sudo setfacl -m user:omsagent:r /etc/kubernetes/host/azure.json @@ -185,14 +171,14 @@ done source config_env_var -#Parse the configmap to set the right environment variables for health feature. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-health-config.rb +#Parse the configmap to set the right environment variables for agent config. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb -cat health_config_env_var | while read line; do +cat agent_config_env_var | while read line; do #echo $line echo $line >> ~/.bashrc done -source health_config_env_var +source agent_config_env_var #Parse the configmap to set the right environment variables for network policy manager (npm) integration. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb @@ -443,7 +429,7 @@ echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc #region check to auto-activate oneagent, to route container logs, #Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap -# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map +# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map # AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic echo "************start oneagent log routing checks************" # by default, use configmap route for safer side @@ -476,9 +462,9 @@ else echo "current region is not in oneagent regions..." fi -if [ "$isoneagentregion" = true ]; then +if [ "$isoneagentregion" = true ]; then #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route - if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then + if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" else #there is no configmap route, so route thru oneagent @@ -525,7 +511,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then echo "starting mdsd ..." mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & - + touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 fi fi From 32c1da07b47b135da494008128555b9aabf1e797 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 5 Jan 2021 19:25:22 -0800 Subject: [PATCH 004/175] adding container at the right place --- kubernetes/omsagent.yaml | 59 ---------------------------------------- 1 file changed, 59 deletions(-) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 897d72cbe..a5a8592a9 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -412,65 +412,6 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - labelSelector: - matchExpressions: - # kubernetes.io/os label doesnt exist in k8s versions < 1.14 so make sure to choose label based on k8s version in aks yaml - - key: kubernetes.io/os - operator: In - values: - - linux - - key: type - operator: NotIn - values: - - virtual-kubelet - # Tolerate a NoSchedule taint on master that ACS Engine sets. - tolerations: - - operator: "Exists" - effect: "NoSchedule" - - operator: "Exists" - effect: "NoExecute" - - operator: "Exists" - effect: "PreferNoSchedule" - volumes: - - name: host-root - hostPath: - path: / - - name: docker-sock - hostPath: - path: /var/run - - name: container-hostname - hostPath: - path: /etc/hostname - - name: host-log - hostPath: - path: /var/log - - name: containerlog-path - hostPath: - path: /var/lib/docker/containers - - name: containerlog-path-2 - hostPath: - path: /mnt/docker - - name: containerlog-path-3 - hostPath: - path: /mnt/containers - - name: azure-json-path - hostPath: - path: /etc/kubernetes - - name: omsagent-secret - secret: - secretName: omsagent-secret - - name: settings-vol-config - configMap: - name: container-azm-ms-agentconfig - optional: true - - name: omsagent-adx-secret - secret: - secretName: omsagent-adx-secret - optional: true - name: omsagent-prometheus image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" imagePullPolicy: IfNotPresent From 28374fbcaefbaf2380d927702a38ca5b868ea01e Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 11 Jan 2021 19:35:41 -0800 Subject: [PATCH 005/175] bootstrap sidecar container changes --- .../installer/conf/prometheus-side-car.conf | 12 ++++++++ .../installer/datafiles/base_container.data | 1 + kubernetes/linux/main.sh | 29 ++++++++++++------- kubernetes/omsagent.yaml | 4 +-- 4 files changed, 33 insertions(+), 13 deletions(-) create mode 100644 build/linux/installer/conf/prometheus-side-car.conf diff --git a/build/linux/installer/conf/prometheus-side-car.conf b/build/linux/installer/conf/prometheus-side-car.conf new file mode 100644 index 000000000..7073c43d7 --- /dev/null +++ b/build/linux/installer/conf/prometheus-side-car.conf @@ -0,0 +1,12 @@ +# Fluentd config file for OMS Docker - container components (non kubeAPI) + +# Forward port 25225 for container logs + + type forward + port 25225 + bind 127.0.0.1 + + + + + diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index ca2538b79..cb2ebf066 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -110,6 +110,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlrb/version.rb; source/toml-parser/tomlrb/version.rb; 644; root; root /opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root +/etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf; build/linux/installer/conf/prometheus-side-car.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index ed16d3e32..7cc704565 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -2,6 +2,8 @@ if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf +else if [[ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]]; then + cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf else sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf fi @@ -520,18 +522,23 @@ echo "************end oneagent log routing checks************" #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then - if [ "$CONTAINER_RUNTIME" == "docker" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" - else - echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" - sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + if [[ -z "${CONTAINER_TYPE}" ]]; then + if [ "$CONTAINER_RUNTIME" == "docker" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + else + echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" + sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi + else if [[ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi -else - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" +# else +# /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & +# telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi #set env vars used by telegraf diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index a5a8592a9..1021df98c 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -431,8 +431,8 @@ spec: #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" - - name: CONTROLLER_TYPE - value: "DaemonSet-Prometheus" + - name: CONTAINER_TYPE + value: "Prometheus-Sidecar" - name: NODE_IP valueFrom: fieldRef: From e462a98d6a075f4a173cc612bef91d72c8384e22 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 12 Jan 2021 14:08:23 -0800 Subject: [PATCH 006/175] fixing script --- kubernetes/linux/main.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index b03a69fc2..7963a6c7d 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -2,7 +2,7 @@ if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -else if [[ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]]; then +elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf else sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf @@ -523,7 +523,7 @@ echo "************end oneagent log routing checks************" #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then - if [[ -z "${CONTAINER_TYPE}" ]]; then + if [ -z "${CONTAINER_TYPE}" ]; then if [ "$CONTAINER_RUNTIME" == "docker" ]; then /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" @@ -533,7 +533,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" fi - else if [[ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]]; then + elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi From 148a74bd9bff616a2079cdf9f97f8b0c11341d38 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 12 Jan 2021 18:15:36 -0800 Subject: [PATCH 007/175] more changes --- .../linux/installer/scripts/livenessprobe.sh | 14 ++++++----- kubernetes/linux/main.sh | 23 +++++++++++-------- kubernetes/omsagent.yaml | 16 ++++++------- 3 files changed, 30 insertions(+), 23 deletions(-) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index e3f9fb475..32fca8a2c 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -18,12 +18,14 @@ if [ -e "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" ]; then fi fi -#test to exit non zero value if fluentbit is not running -(ps -ef | grep td-agent-bit | grep -v "grep") -if [ $? -ne 0 ] -then - echo "Fluentbit is not running" > /dev/termination-log - exit 1 +#test to exit non zero value if fluentbit is not running in daemonset +if [ ! -e "/etc/config/kube.conf" ]; then + (ps -ef | grep td-agent-bit | grep -v "grep") + if [ $? -ne 0 ] + then + echo "Fluentbit is not running" > /dev/termination-log + exit 1 + fi fi if [ ! -s "inotifyoutput.txt" ] diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 7963a6c7d..0b5ad5d39 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -1,10 +1,13 @@ #!/bin/bash if [ -e "/etc/config/kube.conf" ]; then + echo "rashmi-in-rs-omsagent-conf" cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + echo "rashmi-in-ds-prom-omsagent-conf" cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf else + echo "rashmi-in-ds-omsagent-conf" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf fi sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf @@ -223,15 +226,17 @@ fi #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then - cat defaultpromenvvariables | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables -else - cat defaultpromenvvariables-rs | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables-rs + if [ -z "${CONTAINER_TYPE}" ]; then + cat defaultpromenvvariables | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables + elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + cat defaultpromenvvariables-rs | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables-rs + fi fi #Sourcing telemetry environment variable file if it exists diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index aa844262f..e06394996 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -505,14 +505,14 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true - #livenessProbe: - #exec: - #command: - #- /bin/bash - #- -c - #- /opt/livenessprobe.sh - #initialDelaySeconds: 60 - #periodSeconds: 60 + livenessProbe: + exec: + command: + - /bin/bash + - -c + - /opt/livenessprobe.sh + initialDelaySeconds: 60 + periodSeconds: 60 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: From 09c889c26c8b48630fd12ab6bdd30f2bc5f4873b Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 12 Jan 2021 18:34:46 -0800 Subject: [PATCH 008/175] fix --- .../scripts/tomlparser-npm-config.rb | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb index 777fef209..aa4a6430d 100644 --- a/build/linux/installer/scripts/tomlparser-npm-config.rb +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -16,11 +16,12 @@ @collect_advanced_npm_metrics = false @npm_default_setting = "[]" @npm_node_urls = "[\"http://$NODE_IP:10091/node-metrics\"]" -@npm_cluster_urls="[\"http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics\"]" +@npm_cluster_urls = "[\"http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics\"]" @npm_basic_drop_metrics_cluster = "[\"npm_ipset_counts\"]" @tgfConfigFileDS = "/etc/opt/microsoft/docker-cimprov/telegraf.conf" @tgfConfigFileRS = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" @replicaset = "replicaset" +@promSideCar = "prometheus-sidecar" # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap @@ -45,14 +46,14 @@ def parseConfigMap def populateSettingValuesFromConfigMap(parsedConfig) begin if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].nil? - advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s - puts "config::npm::got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" - if !advanced_npm_metrics.nil? && advanced_npm_metrics.strip.casecmp("true") == 0 - @collect_advanced_npm_metrics = true - else - @collect_advanced_npm_metrics = false - end - puts "config::npm::set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" + advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s + puts "config::npm::got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" + if !advanced_npm_metrics.nil? && advanced_npm_metrics.strip.casecmp("true") == 0 + @collect_advanced_npm_metrics = true + else + @collect_advanced_npm_metrics = false + end + puts "config::npm::set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" end rescue => errorStr puts "config::npm::error:Exception while reading config settings for npm advanced setting - #{errorStr}, using defaults" @@ -60,14 +61,14 @@ def populateSettingValuesFromConfigMap(parsedConfig) end begin if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].nil? - basic_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].to_s - puts "config::npm::got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" - if !basic_npm_metrics.nil? && basic_npm_metrics.strip.casecmp("true") == 0 - @collect_basic_npm_metrics = true - else - @collect_basic_npm_metrics = false - end - puts "config::npm::set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" + basic_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].to_s + puts "config::npm::got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" + if !basic_npm_metrics.nil? && basic_npm_metrics.strip.casecmp("true") == 0 + @collect_basic_npm_metrics = true + else + @collect_basic_npm_metrics = false + end + puts "config::npm::set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" end rescue => errorStr puts "config::npm::error:Exception while reading config settings for npm basic setting - #{errorStr}, using defaults" @@ -90,12 +91,11 @@ def populateSettingValuesFromConfigMap(parsedConfig) @collect_advanced_npm_metrics = false end - - controller = ENV["CONTROLLER_TYPE"] +container_type = ENV["CONTAINER_TYPE"] tgfConfigFile = @tgfConfigFileDS -if controller.casecmp(@replicaset) == 0 +if ((controller.casecmp(@replicaset) == 0) || (container_type.casecmp(@promSideCar) == 0)) tgfConfigFile = @tgfConfigFileRS end @@ -123,7 +123,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) telemetryFile = File.open("integration_npm_config_env_var", "w") if !telemetryFile.nil? - if @collect_advanced_npm_metrics == true + if @collect_advanced_npm_metrics == true telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED=1\n") elsif @collect_basic_npm_metrics == true telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_BASIC=1\n") From 039116951a3972795ed761f3508767751800838b Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 14:26:49 -0800 Subject: [PATCH 009/175] redoing a bit --- .../linux/installer/scripts/livenessprobe.sh | 14 +- kubernetes/linux/main - Copy.sh | 623 ++++++++++++++++++ kubernetes/linux/main.sh | 48 +- kubernetes/omsagent.yaml | 16 +- 4 files changed, 657 insertions(+), 44 deletions(-) create mode 100644 kubernetes/linux/main - Copy.sh diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 32fca8a2c..e3f9fb475 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -18,14 +18,12 @@ if [ -e "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" ]; then fi fi -#test to exit non zero value if fluentbit is not running in daemonset -if [ ! -e "/etc/config/kube.conf" ]; then - (ps -ef | grep td-agent-bit | grep -v "grep") - if [ $? -ne 0 ] - then - echo "Fluentbit is not running" > /dev/termination-log - exit 1 - fi +#test to exit non zero value if fluentbit is not running +(ps -ef | grep td-agent-bit | grep -v "grep") +if [ $? -ne 0 ] +then + echo "Fluentbit is not running" > /dev/termination-log + exit 1 fi if [ ! -s "inotifyoutput.txt" ] diff --git a/kubernetes/linux/main - Copy.sh b/kubernetes/linux/main - Copy.sh new file mode 100644 index 000000000..0b5ad5d39 --- /dev/null +++ b/kubernetes/linux/main - Copy.sh @@ -0,0 +1,623 @@ +#!/bin/bash + +if [ -e "/etc/config/kube.conf" ]; then + echo "rashmi-in-rs-omsagent-conf" + cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf +elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + echo "rashmi-in-ds-prom-omsagent-conf" + cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf +else + echo "rashmi-in-ds-omsagent-conf" + sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf +fi +sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf +sed -i -e 's/^exit 101$/exit 0/g' /usr/sbin/policy-rc.d + +#Using the get_hostname for hostname instead of the host field in syslog messages +sed -i.bak "s/record\[\"Host\"\] = hostname/record\[\"Host\"\] = OMS::Common.get_hostname/" /opt/microsoft/omsagent/plugin/filter_syslog.rb + +#using /var/opt/microsoft/docker-cimprov/state instead of /var/opt/microsoft/omsagent/state since the latter gets deleted during onboarding +mkdir -p /var/opt/microsoft/docker-cimprov/state + +#if [ ! -e "/etc/config/kube.conf" ]; then + # add permissions for omsagent user to access docker.sock + #sudo setfacl -m user:omsagent:rw /var/run/host/docker.sock +#fi + +# add permissions for omsagent user to access azure.json. +sudo setfacl -m user:omsagent:r /etc/kubernetes/host/azure.json + +# add permission for omsagent user to log folder. We also need 'x', else log rotation is failing. TODO: Investigate why. +sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log + +#Run inotify as a daemon to track changes to the mounted configmap. +inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' + +#resourceid override for loganalytics data. +if [ -z $AKS_RESOURCE_ID ]; then + echo "not setting customResourceId" +else + export customResourceId=$AKS_RESOURCE_ID + echo "export customResourceId=$AKS_RESOURCE_ID" >> ~/.bashrc + source ~/.bashrc + echo "customResourceId:$customResourceId" +fi + +#set agent config schema version +if [ -e "/etc/config/settings/schema-version" ] && [ -s "/etc/config/settings/schema-version" ]; then + #trim + config_schema_version="$(cat /etc/config/settings/schema-version | xargs)" + #remove all spaces + config_schema_version="${config_schema_version//[[:space:]]/}" + #take first 10 characters + config_schema_version="$(echo $config_schema_version| cut -c1-10)" + + export AZMON_AGENT_CFG_SCHEMA_VERSION=$config_schema_version + echo "export AZMON_AGENT_CFG_SCHEMA_VERSION=$config_schema_version" >> ~/.bashrc + source ~/.bashrc + echo "AZMON_AGENT_CFG_SCHEMA_VERSION:$AZMON_AGENT_CFG_SCHEMA_VERSION" +fi + +#set agent config file version +if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/config-version" ]; then + #trim + config_file_version="$(cat /etc/config/settings/config-version | xargs)" + #remove all spaces + config_file_version="${config_file_version//[[:space:]]/}" + #take first 10 characters + config_file_version="$(echo $config_file_version| cut -c1-10)" + + export AZMON_AGENT_CFG_FILE_VERSION=$config_file_version + echo "export AZMON_AGENT_CFG_FILE_VERSION=$config_file_version" >> ~/.bashrc + source ~/.bashrc + echo "AZMON_AGENT_CFG_FILE_VERSION:$AZMON_AGENT_CFG_FILE_VERSION" +fi + +export PROXY_ENDPOINT="" + +# Check for internet connectivity or workspace deletion +if [ -e "/etc/omsagent-secret/WSID" ]; then + workspaceId=$(cat /etc/omsagent-secret/WSID) + if [ -e "/etc/omsagent-secret/DOMAIN" ]; then + domain=$(cat /etc/omsagent-secret/DOMAIN) + else + domain="opinsights.azure.com" + fi + + if [ -e "/etc/omsagent-secret/PROXY" ]; then + export PROXY_ENDPOINT=$(cat /etc/omsagent-secret/PROXY) + # Validate Proxy Endpoint URL + # extract the protocol:// + proto="$(echo $PROXY_ENDPOINT | grep :// | sed -e's,^\(.*://\).*,\1,g')" + # convert the protocol prefix in lowercase for validation + proxyprotocol=$(echo $proto | tr "[:upper:]" "[:lower:]") + if [ "$proxyprotocol" != "http://" -a "$proxyprotocol" != "https://" ]; then + echo "-e error proxy endpoint should be in this format http(s)://:@:" + fi + # remove the protocol + url="$(echo ${PROXY_ENDPOINT/$proto/})" + # extract the creds + creds="$(echo $url | grep @ | cut -d@ -f1)" + user="$(echo $creds | cut -d':' -f1)" + pwd="$(echo $creds | cut -d':' -f2)" + # extract the host and port + hostport="$(echo ${url/$creds@/} | cut -d/ -f1)" + # extract host without port + host="$(echo $hostport | sed -e 's,:.*,,g')" + # extract the port + port="$(echo $hostport | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')" + + if [ -z "$user" -o -z "$pwd" -o -z "$host" -o -z "$port" ]; then + echo "-e error proxy endpoint should be in this format http(s)://:@:" + else + echo "successfully validated provided proxy endpoint is valid and expected format" + fi + fi + + if [ ! -z "$PROXY_ENDPOINT" ]; then + echo "Making curl request to oms endpint with domain: $domain and proxy: $PROXY_ENDPOINT" + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT + else + echo "Making curl request to oms endpint with domain: $domain" + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest + fi + + if [ $? -ne 0 ]; then + if [ ! -z "$PROXY_ENDPOINT" ]; then + echo "Making curl request to ifconfig.co with proxy: $PROXY_ENDPOINT" + RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co --proxy $PROXY_ENDPOINT` + else + echo "Making curl request to ifconfig.co" + RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co` + fi + if [ $RET -eq 000 ]; then + echo "-e error Error resolving host during the onboarding request. Check the internet connectivity and/or network policy on the cluster" + else + # Retrying here to work around network timing issue + if [ ! -z "$PROXY_ENDPOINT" ]; then + echo "ifconfig check succeeded, retrying oms endpoint with proxy..." + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT + else + echo "ifconfig check succeeded, retrying oms endpoint..." + curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest + fi + + if [ $? -ne 0 ]; then + echo "-e error Error resolving host during the onboarding request. Workspace might be deleted." + else + echo "curl request to oms endpoint succeeded with retry." + fi + fi + else + echo "curl request to oms endpoint succeeded." + fi +else + echo "LA Onboarding:Workspace Id not mounted, skipping the telemetry check" +fi + +# Set environment variable for if public cloud by checking the workspace domain. +if [ -z $domain ]; then + ClOUD_ENVIRONMENT="unknown" +elif [ $domain == "opinsights.azure.com" ]; then + CLOUD_ENVIRONMENT="public" +else + CLOUD_ENVIRONMENT="national" +fi +export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT +echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc + +#Parse the configmap to set the right environment variables. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb + +cat config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc +done +source config_env_var + + +#Parse the configmap to set the right environment variables for agent config. +#Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb + +cat agent_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc +done +source agent_config_env_var + +#Parse the configmap to set the right environment variables for network policy manager (npm) integration. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb + +cat integration_npm_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc +done +source integration_npm_config_env_var + +#Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset +if [ ! -e "/etc/config/kube.conf" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb +fi + +#Parse the prometheus configmap to create a file with new custom settings. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb + +#If config parsing was successful, a copy of the conf file with replaced custom settings file is created +if [ ! -e "/etc/config/kube.conf" ]; then + if [ -e "/opt/telegraf-test.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi +else + if [ -e "/opt/telegraf-test-rs.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-rs.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi +fi + +#Setting default environment variables to be used in any case of failure in the above steps +if [ ! -e "/etc/config/kube.conf" ]; then + if [ -z "${CONTAINER_TYPE}" ]; then + cat defaultpromenvvariables | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables + elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + cat defaultpromenvvariables-rs | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables-rs + fi +fi + +#Sourcing telemetry environment variable file if it exists +if [ -e "telemetry_prom_config_env_var" ]; then + cat telemetry_prom_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source telemetry_prom_config_env_var +fi + +#Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb + +cat config_mdm_metrics_env_var | while read line; do + echo $line >> ~/.bashrc +done +source config_mdm_metrics_env_var + +#Parse the configmap to set the right environment variables for metric collection settings +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + +cat config_metric_collection_env_var | while read line; do + echo $line >> ~/.bashrc +done +source config_metric_collection_env_var + +#Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request +echo "Making wget request to cadvisor endpoint with port 10250" +#Defaults to use port 10255 +cAdvisorIsSecure=false +RET_CODE=`wget --server-response https://$NODE_IP:10250/stats/summary --no-check-certificate --header="Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" 2>&1 | awk '/^ HTTP/{print $2}'` +if [ $RET_CODE -eq 200 ]; then + cAdvisorIsSecure=true +fi + +# default to docker since this is default in AKS as of now and change to containerd once this becomes default in AKS +export CONTAINER_RUNTIME="docker" +export NODE_NAME="" + +if [ "$cAdvisorIsSecure" = true ]; then + echo "Wget request using port 10250 succeeded. Using 10250" + export IS_SECURE_CADVISOR_PORT=true + echo "export IS_SECURE_CADVISOR_PORT=true" >> ~/.bashrc + export CADVISOR_METRICS_URL="https://$NODE_IP:10250/metrics" + echo "export CADVISOR_METRICS_URL=https://$NODE_IP:10250/metrics" >> ~/.bashrc + echo "Making curl request to cadvisor endpoint /pods with port 10250 to get the configured container runtime on kubelet" + podWithValidContainerId=$(curl -s -k -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://$NODE_IP:10250/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') +else + echo "Wget request using port 10250 failed. Using port 10255" + export IS_SECURE_CADVISOR_PORT=false + echo "export IS_SECURE_CADVISOR_PORT=false" >> ~/.bashrc + export CADVISOR_METRICS_URL="http://$NODE_IP:10255/metrics" + echo "export CADVISOR_METRICS_URL=http://$NODE_IP:10255/metrics" >> ~/.bashrc + echo "Making curl request to cadvisor endpoint with port 10255 to get the configured container runtime on kubelet" + podWithValidContainerId=$(curl -s http://$NODE_IP:10255/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') +fi + +if [ ! -z "$podWithValidContainerId" ]; then + containerRuntime=$(echo $podWithValidContainerId | jq -r '.status.containerStatuses[0].containerID' | cut -d ':' -f 1) + nodeName=$(echo $podWithValidContainerId | jq -r '.spec.nodeName') + # convert to lower case so that everywhere else can be used in lowercase + containerRuntime=$(echo $containerRuntime | tr "[:upper:]" "[:lower:]") + nodeName=$(echo $nodeName | tr "[:upper:]" "[:lower:]") + # update runtime only if its not empty, not null and not startswith docker + if [ -z "$containerRuntime" -o "$containerRuntime" == null ]; then + echo "using default container runtime as $CONTAINER_RUNTIME since got containeRuntime as empty or null" + elif [[ $containerRuntime != docker* ]]; then + export CONTAINER_RUNTIME=$containerRuntime + fi + + if [ -z "$nodeName" -o "$nodeName" == null ]; then + echo "-e error nodeName in /pods API response is empty" + else + export NODE_NAME=$nodeName + fi +else + echo "-e error either /pods API request failed or no running pods" +fi + +echo "configured container runtime on kubelet is : "$CONTAINER_RUNTIME +echo "export CONTAINER_RUNTIME="$CONTAINER_RUNTIME >> ~/.bashrc + +export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="kubelet_runtime_operations_total" +echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >> ~/.bashrc +export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="kubelet_runtime_operations_errors_total" +echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >> ~/.bashrc + +# default to docker metrics +export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_docker_operations" +export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_docker_operations_errors" + +if [ "$CONTAINER_RUNTIME" != "docker" ]; then + # these metrics are avialble only on k8s versions <1.18 and will get deprecated from 1.18 + export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_runtime_operations" + export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" +else + #if container run time is docker then add omsagent user to local docker group to get access to docker.sock + # docker.sock only use for the telemetry to get the docker version + DOCKER_SOCKET=/var/run/host/docker.sock + DOCKER_GROUP=docker + REGULAR_USER=omsagent + if [ -S ${DOCKER_SOCKET} ]; then + echo "getting gid for docker.sock" + DOCKER_GID=$(stat -c '%g' ${DOCKER_SOCKET}) + echo "creating a local docker group" + groupadd -for -g ${DOCKER_GID} ${DOCKER_GROUP} + echo "adding omsagent user to local docker group" + usermod -aG ${DOCKER_GROUP} ${REGULAR_USER} + fi +fi + +echo "set caps for ruby process to read container env from proc" +sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /opt/microsoft/omsagent/ruby/bin/ruby + +echo "export KUBELET_RUNTIME_OPERATIONS_METRIC="$KUBELET_RUNTIME_OPERATIONS_METRIC >> ~/.bashrc +echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC >> ~/.bashrc + +source ~/.bashrc + +echo $NODE_NAME > /var/opt/microsoft/docker-cimprov/state/containerhostname +#check if file was written successfully. +cat /var/opt/microsoft/docker-cimprov/state/containerhostname + + +#Commenting it for test. We do this in the installer now +#Setup sudo permission for containerlogtailfilereader +#chmod +w /etc/sudoers.d/omsagent +#echo "#run containerlogtailfilereader.rb for docker-provider" >> /etc/sudoers.d/omsagent +#echo "omsagent ALL=(ALL) NOPASSWD: /opt/microsoft/omsagent/ruby/bin/ruby /opt/microsoft/omsagent/plugin/containerlogtailfilereader.rb *" >> /etc/sudoers.d/omsagent +#chmod 440 /etc/sudoers.d/omsagent + +#Disable dsc +#/opt/microsoft/omsconfig/Scripts/OMS_MetaConfigHelper.py --disable +rm -f /etc/opt/microsoft/omsagent/conf/omsagent.d/omsconfig.consistencyinvoker.conf + +CIWORKSPACE_id="" +CIWORKSPACE_key="" + +if [ -z $INT ]; then + if [ -a /etc/omsagent-secret/PROXY ]; then + if [ -a /etc/omsagent-secret/DOMAIN ]; then + /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -d `cat /etc/omsagent-secret/DOMAIN` -p `cat /etc/omsagent-secret/PROXY` + else + /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -p `cat /etc/omsagent-secret/PROXY` + fi + CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" + CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" + elif [ -a /etc/omsagent-secret/DOMAIN ]; then + /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -d `cat /etc/omsagent-secret/DOMAIN` + CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" + CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" + elif [ -a /etc/omsagent-secret/WSID ]; then + /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` + CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" + CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" + elif [ -a /run/secrets/DOMAIN ]; then + /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /run/secrets/WSID` -s `cat /run/secrets/KEY` -d `cat /run/secrets/DOMAIN` + CIWORKSPACE_id="$(cat /run/secrets/WSID)" + CIWORKSPACE_key="$(cat /run/secrets/KEY)" + elif [ -a /run/secrets/WSID ]; then + /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /run/secrets/WSID` -s `cat /run/secrets/KEY` + CIWORKSPACE_id="$(cat /run/secrets/WSID)" + CIWORKSPACE_key="$(cat /run/secrets/KEY)" + elif [ -z $DOMAIN ]; then + /opt/microsoft/omsagent/bin/omsadmin.sh -w $WSID -s $KEY + CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" + CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" + else + /opt/microsoft/omsagent/bin/omsadmin.sh -w $WSID -s $KEY -d $DOMAIN + CIWORKSPACE_id="$WSID" + CIWORKSPACE_key="$KEY" + fi +else +#To onboard to INT workspace - workspace-id (WSID-not base64 encoded), workspace-key (KEY-not base64 encoded), Domain(DOMAIN-int2.microsoftatlanta-int.com) +#need to be added to omsagent.yaml. + echo WORKSPACE_ID=$WSID > /etc/omsagent-onboard.conf + echo SHARED_KEY=$KEY >> /etc/omsagent-onboard.conf + echo URL_TLD=$DOMAIN >> /etc/omsagent-onboard.conf + /opt/microsoft/omsagent/bin/omsadmin.sh + CIWORKSPACE_id="$WSID" + CIWORKSPACE_key="$KEY" +fi + +#start cron daemon for logrotate +service cron start + +#check if agent onboarded successfully +/opt/microsoft/omsagent/bin/omsadmin.sh -l + +#get omsagent and docker-provider versions +dpkg -l | grep omsagent | awk '{print $2 " " $3}' +dpkg -l | grep docker-cimprov | awk '{print $2 " " $3}' + +DOCKER_CIMPROV_VERSION=$(dpkg -l | grep docker-cimprov | awk '{print $3}') +echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" +export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION +echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc + +#region check to auto-activate oneagent, to route container logs, +#Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap +# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map +# AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic +echo "************start oneagent log routing checks************" +# by default, use configmap route for safer side +AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE + +#trim region list +oneagentregions="$(echo $AZMON_CONTAINERLOGS_ONEAGENT_REGIONS | xargs)" +#lowercase region list +typeset -l oneagentregions=$oneagentregions +echo "oneagent regions: $oneagentregions" +#trim current region +currentregion="$(echo $AKS_REGION | xargs)" +#lowercase current region +typeset -l currentregion=$currentregion +echo "current region: $currentregion" + +#initilze isoneagentregion as false +isoneagentregion=false + +#set isoneagentregion as true if matching region is found +if [ ! -z $oneagentregions ] && [ ! -z $currentregion ]; then + for rgn in $(echo $oneagentregions | sed "s/,/ /g"); do + if [ "$rgn" == "$currentregion" ]; then + isoneagentregion=true + echo "current region is in oneagent regions..." + break + fi + done +else + echo "current region is not in oneagent regions..." +fi + +if [ "$isoneagentregion" = true ]; then + #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route + if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE + echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + else #there is no configmap route, so route thru oneagent + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE="v2" + echo "oneagent region is true for current region:$currentregion and config map logs route is empty. so using oneagent as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + fi +else + echo "oneagent region is false for current region:$currentregion" +fi + + +#start oneagent +if [ ! -e "/etc/config/kube.conf" ]; then + if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then + echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" + echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + #trim + containerlogsroute="$(echo $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE | xargs)" + # convert to lowercase + typeset -l containerlogsroute=$containerlogsroute + + echo "setting AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE as :$containerlogsroute" + export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute + echo "export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute" >> ~/.bashrc + source ~/.bashrc + + if [ "$containerlogsroute" == "v2" ]; then + echo "activating oneagent..." + echo "configuring mdsd..." + cat /etc/mdsd.d/envmdsd | while read line; do + echo $line >> ~/.bashrc + done + source /etc/mdsd.d/envmdsd + + echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" + export CIWORKSPACE_id=$CIWORKSPACE_id + echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc + export CIWORKSPACE_key=$CIWORKSPACE_key + echo "export CIWORKSPACE_key=$CIWORKSPACE_key" >> ~/.bashrc + + source ~/.bashrc + + dpkg -l | grep mdsd | awk '{print $2 " " $3}' + + echo "starting mdsd ..." + mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + + touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 + fi + fi +fi +echo "************end oneagent log routing checks************" + +#telegraf & fluentbit requirements +if [ ! -e "/etc/config/kube.conf" ]; then + if [ -z "${CONTAINER_TYPE}" ]; then + if [ "$CONTAINER_RUNTIME" == "docker" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + else + echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" + sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi + elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + fi +# else +# /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & +# telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" +fi + +#set env vars used by telegraf +if [ -z $AKS_RESOURCE_ID ]; then + telemetry_aks_resource_id="" + telemetry_aks_region="" + telemetry_cluster_name="" + telemetry_acs_resource_name=$ACS_RESOURCE_NAME + telemetry_cluster_type="ACS" +else + telemetry_aks_resource_id=$AKS_RESOURCE_ID + telemetry_aks_region=$AKS_REGION + telemetry_cluster_name=$AKS_RESOURCE_ID + telemetry_acs_resource_name="" + telemetry_cluster_type="AKS" +fi + +export TELEMETRY_AKS_RESOURCE_ID=$telemetry_aks_resource_id +echo "export TELEMETRY_AKS_RESOURCE_ID=$telemetry_aks_resource_id" >> ~/.bashrc +export TELEMETRY_AKS_REGION=$telemetry_aks_region +echo "export TELEMETRY_AKS_REGION=$telemetry_aks_region" >> ~/.bashrc +export TELEMETRY_CLUSTER_NAME=$telemetry_cluster_name +echo "export TELEMETRY_CLUSTER_NAME=$telemetry_cluster_name" >> ~/.bashrc +export TELEMETRY_ACS_RESOURCE_NAME=$telemetry_acs_resource_name +echo "export TELEMETRY_ACS_RESOURCE_NAME=$telemetry_acs_resource_name" >> ~/.bashrc +export TELEMETRY_CLUSTER_TYPE=$telemetry_cluster_type +echo "export TELEMETRY_CLUSTER_TYPE=$telemetry_cluster_type" >> ~/.bashrc + +#if [ ! -e "/etc/config/kube.conf" ]; then +# nodename=$(cat /hostfs/etc/hostname) +#else +nodename=$(cat /var/opt/microsoft/docker-cimprov/state/containerhostname) +#fi +echo "nodename: $nodename" +echo "replacing nodename in telegraf config" +sed -i -e "s/placeholder_hostname/$nodename/g" $telegrafConfFile + +export HOST_MOUNT_PREFIX=/hostfs +echo "export HOST_MOUNT_PREFIX=/hostfs" >> ~/.bashrc +export HOST_PROC=/hostfs/proc +echo "export HOST_PROC=/hostfs/proc" >> ~/.bashrc +export HOST_SYS=/hostfs/sys +echo "export HOST_SYS=/hostfs/sys" >> ~/.bashrc +export HOST_ETC=/hostfs/etc +echo "export HOST_ETC=/hostfs/etc" >> ~/.bashrc +export HOST_VAR=/hostfs/var +echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc + +aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) +export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey +echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc + +source ~/.bashrc + +#start telegraf +/opt/telegraf --config $telegrafConfFile & +/opt/telegraf --version +dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' + +#dpkg -l | grep telegraf | awk '{print $2 " " $3}' + + + +echo "stopping rsyslog..." +service rsyslog stop + +echo "getting rsyslog status..." +service rsyslog status + +shutdown() { + /opt/microsoft/omsagent/bin/service_control stop + } + +trap "shutdown" SIGTERM + +sleep inf & wait diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 0b5ad5d39..b4580f45f 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -1,7 +1,6 @@ #!/bin/bash if [ -e "/etc/config/kube.conf" ]; then - echo "rashmi-in-rs-omsagent-conf" cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then echo "rashmi-in-ds-prom-omsagent-conf" @@ -226,17 +225,15 @@ fi #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then - if [ -z "${CONTAINER_TYPE}" ]; then - cat defaultpromenvvariables | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables - elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - cat defaultpromenvvariables-rs | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables-rs - fi + cat defaultpromenvvariables | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables +else + cat defaultpromenvvariables-rs | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables-rs fi #Sourcing telemetry environment variable file if it exists @@ -528,23 +525,18 @@ echo "************end oneagent log routing checks************" #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then - if [ -z "${CONTAINER_TYPE}" ]; then - if [ "$CONTAINER_RUNTIME" == "docker" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" - else - echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" - sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" - fi - elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + if [ "$CONTAINER_RUNTIME" == "docker" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + else + echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" + sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" fi -# else -# /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & -# telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" +else + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi #set env vars used by telegraf diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index e06394996..aa844262f 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -505,14 +505,14 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true - livenessProbe: - exec: - command: - - /bin/bash - - -c - - /opt/livenessprobe.sh - initialDelaySeconds: 60 - periodSeconds: 60 + #livenessProbe: + #exec: + #command: + #- /bin/bash + #- -c + #- /opt/livenessprobe.sh + #initialDelaySeconds: 60 + #periodSeconds: 60 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: From 7418474f9da12560ee6a19f9145651d39c17e587 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 14:27:54 -0800 Subject: [PATCH 010/175] undoing npm --- .../scripts/tomlparser-npm-config.rb | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb index aa4a6430d..777fef209 100644 --- a/build/linux/installer/scripts/tomlparser-npm-config.rb +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -16,12 +16,11 @@ @collect_advanced_npm_metrics = false @npm_default_setting = "[]" @npm_node_urls = "[\"http://$NODE_IP:10091/node-metrics\"]" -@npm_cluster_urls = "[\"http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics\"]" +@npm_cluster_urls="[\"http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics\"]" @npm_basic_drop_metrics_cluster = "[\"npm_ipset_counts\"]" @tgfConfigFileDS = "/etc/opt/microsoft/docker-cimprov/telegraf.conf" @tgfConfigFileRS = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" @replicaset = "replicaset" -@promSideCar = "prometheus-sidecar" # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap @@ -46,14 +45,14 @@ def parseConfigMap def populateSettingValuesFromConfigMap(parsedConfig) begin if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].nil? - advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s - puts "config::npm::got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" - if !advanced_npm_metrics.nil? && advanced_npm_metrics.strip.casecmp("true") == 0 - @collect_advanced_npm_metrics = true - else - @collect_advanced_npm_metrics = false - end - puts "config::npm::set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" + advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s + puts "config::npm::got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" + if !advanced_npm_metrics.nil? && advanced_npm_metrics.strip.casecmp("true") == 0 + @collect_advanced_npm_metrics = true + else + @collect_advanced_npm_metrics = false + end + puts "config::npm::set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" end rescue => errorStr puts "config::npm::error:Exception while reading config settings for npm advanced setting - #{errorStr}, using defaults" @@ -61,14 +60,14 @@ def populateSettingValuesFromConfigMap(parsedConfig) end begin if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].nil? - basic_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].to_s - puts "config::npm::got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" - if !basic_npm_metrics.nil? && basic_npm_metrics.strip.casecmp("true") == 0 - @collect_basic_npm_metrics = true - else - @collect_basic_npm_metrics = false - end - puts "config::npm::set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" + basic_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].to_s + puts "config::npm::got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" + if !basic_npm_metrics.nil? && basic_npm_metrics.strip.casecmp("true") == 0 + @collect_basic_npm_metrics = true + else + @collect_basic_npm_metrics = false + end + puts "config::npm::set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" end rescue => errorStr puts "config::npm::error:Exception while reading config settings for npm basic setting - #{errorStr}, using defaults" @@ -91,11 +90,12 @@ def populateSettingValuesFromConfigMap(parsedConfig) @collect_advanced_npm_metrics = false end + + controller = ENV["CONTROLLER_TYPE"] -container_type = ENV["CONTAINER_TYPE"] tgfConfigFile = @tgfConfigFileDS -if ((controller.casecmp(@replicaset) == 0) || (container_type.casecmp(@promSideCar) == 0)) +if controller.casecmp(@replicaset) == 0 tgfConfigFile = @tgfConfigFileRS end @@ -123,7 +123,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) telemetryFile = File.open("integration_npm_config_env_var", "w") if !telemetryFile.nil? - if @collect_advanced_npm_metrics == true + if @collect_advanced_npm_metrics == true telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED=1\n") elsif @collect_basic_npm_metrics == true telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_BASIC=1\n") From cd488585157bff0b0fb00627c1413220db669fca Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 16:09:56 -0800 Subject: [PATCH 011/175] changes to container.conf --- build/linux/installer/conf/prometheus-side-car.conf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/build/linux/installer/conf/prometheus-side-car.conf b/build/linux/installer/conf/prometheus-side-car.conf index 7073c43d7..31bc5f5ab 100644 --- a/build/linux/installer/conf/prometheus-side-car.conf +++ b/build/linux/installer/conf/prometheus-side-car.conf @@ -1,11 +1,11 @@ # Fluentd config file for OMS Docker - container components (non kubeAPI) # Forward port 25225 for container logs - - type forward - port 25225 - bind 127.0.0.1 - +# +# type forward +# port 25225 +# bind 127.0.0.1 +# From e3921a74923aedcad08f9faac5a7c9ced62bf25c Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 16:58:05 -0800 Subject: [PATCH 012/175] updating telegraf and fluent bit conf --- .../conf/td-agent-bit-prom-side-car.conf | 28 + .../conf/telegraf-prom-side-car.conf | 796 ++++++++++++++++++ kubernetes/linux/main.sh | 3 + 3 files changed, 827 insertions(+) create mode 100644 build/linux/installer/conf/td-agent-bit-prom-side-car.conf create mode 100644 build/linux/installer/conf/telegraf-prom-side-car.conf diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf new file mode 100644 index 000000000..575981b70 --- /dev/null +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -0,0 +1,28 @@ +[SERVICE] + #Default service flush interval is 15 seconds + ${SERVICE_FLUSH_INTERVAL} + HTTP_Server Off + Daemon Off + storage.path /var/opt/microsoft/docker-cimprov/state/flbstore/ + storage.sync normal + storage.checksum off + storage.backlog.mem_limit 10M + Log_Level info + Parsers_File /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf + Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log + +[INPUT] + Name tcp + Tag oms.container.perf.telegraf.* + Listen 0.0.0.0 + Port 25229 + Chunk_Size 32 + Buffer_Size 64 + Mem_Buf_Limit 5m + +[OUTPUT] + Name oms + EnableTelemetry true + Retry_Limit 10 + TelemetryPushIntervalSeconds 300 + Match oms.container.* \ No newline at end of file diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf new file mode 100644 index 000000000..85ec8a89c --- /dev/null +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -0,0 +1,796 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + #Below are entirely used for telemetry + #AgentVersion = "$AGENT_VERSION" + #AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" + #ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" + #Region = "$TELEMETRY_AKS_REGION" + #ClusterName = "$TELEMETRY_CLUSTER_NAME" + #ClusterType = "$TELEMETRY_CLUSTER_TYPE" + #Computer = "placeholder_hostname" + #ControllerType = "$CONTROLLER_TYPE" + + hostName = "placeholder_hostname" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "15s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = true + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Generic socket writer capable of handling multiple socket types. +[[outputs.socket_writer]] + ## URL to connect to + address = "tcp://0.0.0.0:25229" + # address = "tcp://example.com:http" + # address = "tcp4://127.0.0.1:8094" + # address = "tcp6://127.0.0.1:8094" + # address = "tcp6://[2001:db8::1]:8094" + # address = "udp://127.0.0.1:8094" + # address = "udp4://127.0.0.1:8094" + # address = "udp6://127.0.0.1:8094" + # address = "unix:///tmp/telegraf.sock" + # address = "unixgram:///tmp/telegraf.sock" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## Data format to generate. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + namedrop = ["agent_telemetry"] + #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] + +# # Output to send MDM metrics to fluent bit and then route it to fluentD +# [[outputs.socket_writer]] +# ## URL to connect to +# address = "tcp://0.0.0.0:25228" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" + +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "json" +# namepass = ["container.azm.ms/disk"] +# #fieldpass = ["used_percent"] + +[[outputs.application_insights]] + ## Instrumentation key of the Application Insights resource. + instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" + + ## Timeout for closing (default: 5s). + # timeout = "5s" + + ## Enable additional diagnostic logging. + # enable_diagnostic_logging = false + + ## Context Tag Sources add Application Insights context tags to a tag value. + ## + ## For list of allowed context tag keys see: + ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go + # [outputs.application_insights.context_tag_sources] + # "ai.cloud.role" = "kubernetes_container_name" + # "ai.cloud.roleInstance" = "kubernetes_pod_name" + namepass = ["agent_telemetry"] + #tagdrop = ["nodeName"] + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +[[processors.converter]] + [processors.converter.fields] + float = ["*"] +# # Perform string processing on tags, fields, and measurements +#[[processors.rename]] + #[[processors.rename.replace]] + # measurement = "disk" + # dest = "nodes" +# [[processors.rename.replace]] +# field = "free" +# dest = "freeBytes" +# [[processors.rename.replace]] +# field = "used" +# dest = "usedBytes" +# [[processors.rename.replace]] +# field = "used_percent" +# dest = "usedPercentage" + #[[processors.rename.replace]] + # measurement = "net" + # dest = "nodes" + #[[processors.rename.replace]] + # field = "bytes_recv" + # dest = "networkBytesReceivedTotal" + #[[processors.rename.replace]] + # field = "bytes_sent" + # dest = "networkBytesSentTotal" + #[[processors.rename.replace]] + # field = "err_in" + # dest = "networkErrorsInTotal" + #[[processors.rename.replace]] + # field = "err_out" + # dest = "networkErrorsOutTotal" + #[[processors.rename.replace]] + # measurement = "kubernetes_pod_volume" + # dest = "pods" + #[[processors.rename.replace]] + # field = "used_bytes" + # dest = "podVolumeUsedBytes" + #[[processors.rename.replace]] + # field = "available_bytes" + # dest = "podVolumeAvailableBytes" + #[[processors.rename.replace]] + # measurement = "kubernetes_pod_network" + # dest = "pods" + #[[processors.rename.replace]] + # field = "tx_errors" + # dest = "podNetworkTxErrorsTotal" + #[[processors.rename.replace]] + # field = "rx_errors" + # dest = "podNetworkRxErrorsTotal" + #[[processors.rename.replace]] + # tag = "volume_name" + # dest = "volumeName" + #[[processors.rename.replace]] + # tag = "pod_name" + # dest = "podName" + #[[processors.rename.replace]] + # measurement = "docker" + # dest = "containers" + #[[processors.rename.replace]] + # measurement = "docker_container_status" + # dest = "containers" + #[[processors.rename.replace]] + # field = "n_containers" + # dest = "numContainers" + #[[processors.rename.replace]] + # field = "n_containers_running" + # dest = "numContainersRunning" + #[[processors.rename.replace]] + # field = "n_containers_stopped" + # dest = "numContainersStopped" + #[[processors.rename.replace]] + # field = "n_containers_paused" + # dest = "numContainersPaused" + #[[processors.rename.replace]] + # field = "n_images" + # dest = "numContainerImages" + +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_agregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## The set of buckets. +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## The set of buckets. +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Count the occurance of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +# Read metrics about cpu usage +#[[inputs.cpu]] + ## Whether to report per-cpu stats or not +# percpu = false + ## Whether to report total system cpu stats or not +# totalcpu = true + ## If true, collect raw CPU time metrics. +# collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states. +# report_active = true +# fieldpass = ["usage_active","cluster","node","host","device"] +# taginclude = ["cluster","cpu","node"] + + + +# Read metrics about disk usage by mount point +# [[inputs.disk]] +# name_prefix="container.azm.ms/" +# ## By default stats will be gathered for all mount points. +# ## Set mount_points will restrict the stats to only the specified mount points. +# # mount_points = ["/"] + +# ## Ignore mount points by filesystem type. +# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] +# fieldpass = ["free", "used", "used_percent"] +# taginclude = ["device","path","hostName"] +# # Below due to Bug - https://github.com/influxdata/telegraf/issues/5615 +# # ORDER matters here!! - i.e the below should be the LAST modifier +# [inputs.disk.tagdrop] +# path = ["/var/lib/kubelet*", "/dev/termination-log", "/var/log", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/etc/kubernetes/host", "/var/lib/docker/containers", "/etc/config/settings"] + + +# # Read metrics about memory usage +# #[[inputs.mem]] +# # fieldpass = ["used_percent", "cluster", "node","host","device"] +# # taginclude = ["cluster","node"] + +# # Read metrics about disk IO by device +# [[inputs.diskio]] +# name_prefix="container.azm.ms/" +# ## By default, telegraf will gather stats for all devices including +# ## disk partitions. +# ## Setting devices will restrict the stats to the specified devices. +# devices = ["sd[a-z][0-9]"] +# ## Uncomment the following line if you need disk serial numbers. +# # skip_serial_number = false +# # +# ## On systems which support it, device metadata can be added in the form of +# ## tags. +# ## Currently only Linux is supported via udev properties. You can view +# ## available properties for a device by running: +# ## 'udevadm info -q property -n /dev/sda' +# ## Note: Most, but not all, udev properties can be accessed this way. Properties +# ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. +# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] +# # +# ## Using the same metadata source as device_tags, you can also customize the +# ## name of the device via templates. +# ## The 'name_templates' parameter is a list of templates to try and apply to +# ## the device. The template may contain variables in the form of '$PROPERTY' or +# ## '${PROPERTY}'. The first template which does not contain any variables not +# ## present for the device is used as the device name tag. +# ## The typical use case is for LVM volumes, to get the VG/LV name instead of +# ## the near-meaningless DM-0 name. +# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] +# fieldpass = ["reads", "read_bytes", "read_time", "writes", "write_bytes", "write_time", "io_time", "iops_in_progress"] +# taginclude = ["name","hostName"] + +# # Read metrics about network interface usage +# [[inputs.net]] +# name_prefix="container.azm.ms/" +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# ignore_protocol_stats = true +# ## +# fieldpass = ["bytes_recv", "bytes_sent", "err_in", "err_out"] +# taginclude = ["interface","hostName"] + +# Read metrics from the kubernetes kubelet api +#[[inputs.kubernetes]] + ## URL for the kubelet + #url = "http://1.1.1.1:10255" +# url = "http://placeholder_nodeip:10255" + + ## Use bearer token for authorization + # bearer_token = /path/to/bearer/token + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +# fieldpass = ["used_bytes", "available_bytes", "tx_errors", "rx_errors" ] +# taginclude = ["volume_name","nodeName","namespace","pod_name"] +# Read metrics about docker containers +#[[inputs.docker]] + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/host/docker.sock" + + ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false + + ## Only collect metrics for these containers, collect all if empty +# container_names = [] + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. +# container_state_include = ['*'] + # container_state_exclude = [] + + ## Timeout for docker list, info, and stats commands +# timeout = "5s" + + ## Whether to report for each container per-device blkio (8:0, 8:1...) and + ## network (eth0, eth1, ...) stats or not +# perdevice = true + ## Whether to report for each container total blkio and network stats or not +# total = true + ## Which environment variables should we use as a tag + ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +# fieldpass = ["n_containers", "n_containers_running", "n_containers_stopped", "n_containers_paused", "n_images"] + #fieldpass = ["numContainers", "numContainersRunning", "numContainersStopped", "numContainersPaused", "numContainerImages"] +# taginclude = ["nodeName"] + +#[[inputs.procstat]] +# #name_prefix="t.azm.ms/" +# exe = "mdsd" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# [[inputs.procstat]] +# #name_prefix="container.azm.ms/" +# exe = "ruby" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# [[inputs.procstat]] +# #name_prefix="container.azm.ms/" +# exe = "td-agent-bit" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# [[inputs.procstat]] +# #name_prefix="container.azm.ms/" +# exe = "telegraf" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" + +#kubelet-1 +# [[inputs.prometheus]] +# name_prefix="container.azm.ms/" +# ## An array of urls to scrape metrics from. +# urls = ["$CADVISOR_METRICS_URL"] +# fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC"] + +# metric_version = 2 +# url_tag = "scrapeUrl" + +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" + +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to `https` & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" + +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# #tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = true +# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] +# [inputs.prometheus.tagpass] +# operation_type = ["create_container", "remove_container", "pull_image"] + +# #kubelet-2 +# [[inputs.prometheus]] +# name_prefix="container.azm.ms/" +# ## An array of urls to scrape metrics from. +# urls = ["$CADVISOR_METRICS_URL"] + +# fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] + +# metric_version = 2 +# url_tag = "scrapeUrl" + + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# insecure_skip_verify = true + + +## prometheus custom metrics +# [[inputs.prometheus]] + +# interval = "$AZMON_DS_PROM_INTERVAL" + +# ## An array of urls to scrape metrics from. +# urls = $AZMON_DS_PROM_URLS + +# fieldpass = $AZMON_DS_PROM_FIELDPASS + +# fielddrop = $AZMON_DS_PROM_FIELDDROP + +# metric_version = 2 +# url_tag = "scrapeUrl" + +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" + +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# #tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + +##npm +# [[inputs.prometheus]] +# #name_prefix="container.azm.ms/" +# ## An array of urls to scrape metrics from. +# urls = $AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE + +# metric_version = 2 +# url_tag = "scrapeUrl" + +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" + +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to `https` & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" + +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# #tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = true +# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] +# #[inputs.prometheus.tagpass] +# # operation_type = ["create_container", "remove_container", "pull_image"] + +# [[inputs.exec]] +# ## Commands array +# interval = "15m" +# commands = [ +# "/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh" +# ] + +# ## Timeout for each command to complete. +# timeout = "15s" + +# ## measurement name suffix (for separating different commands) +# name_suffix = "_telemetry" + +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# tagexclude = ["hostName"] +# [inputs.exec.tags] +# AgentVersion = "$AGENT_VERSION" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# ClusterName = "$TELEMETRY_CLUSTER_NAME" +# ClusterType = "$TELEMETRY_CLUSTER_TYPE" +# Computer = "placeholder_hostname" +# ControllerType = "$CONTROLLER_TYPE" \ No newline at end of file diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index b4580f45f..3fd5624d4 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -534,6 +534,9 @@ if [ ! -e "/etc/config/kube.conf" ]; then /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" fi +elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" else /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" From 7ffbbe8d340239d8833af2a641b47ebec57f83b9 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 18:45:51 -0800 Subject: [PATCH 013/175] copying files during build --- .../installer/datafiles/base_container.data | 28 ++++++++++--------- kubernetes/linux/main.sh | 2 ++ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index dc062f48a..e89b8537d 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -110,19 +110,21 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlrb/string_utils.rb; source/toml-parser/tomlrb/string_utils.rb; 644; root; root /opt/tomlrb/version.rb; source/toml-parser/tomlrb/version.rb; 644; root; root -/opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root -/etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf; build/linux/installer/conf/prometheus-side-car.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root -/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root -/opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root -/opt/tomlparser-prom-customconfig.rb; build/linux/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root -/opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root -/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root +/opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root +/etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf; build/linux/installer/conf/prometheus-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf; build/linux/installer/conf/td-agent-bit-prom-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf; build/linux/installer/conf/telegraf-prom-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root +/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root +/opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root +/opt/tomlparser-prom-customconfig.rb; build/linux/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root +/opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root +/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root /opt/tomlparser-agent-config.rb; build/linux/installer/scripts/tomlparser-agent-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 3fd5624d4..74c2269fe 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -535,9 +535,11 @@ if [ ! -e "/etc/config/kube.conf" ]; then telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" fi elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + echo "in side car................" /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" else + echo "in ds..............." /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi From 791b0d3533dfe6555fe6fd8990ddde296f694cbd Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 19:02:30 -0800 Subject: [PATCH 014/175] changes --- kubernetes/linux/main.sh | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 74c2269fe..834446e77 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -525,21 +525,23 @@ echo "************end oneagent log routing checks************" #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then - if [ "$CONTAINER_RUNTIME" == "docker" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + echo "in side car................" + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" else - echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" - sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" - fi -elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - echo "in side car................" - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + echo "in ds................" + if [ "$CONTAINER_RUNTIME" == "docker" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + else + echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" + sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi else - echo "in ds..............." + echo "in rs..............." /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi From 8767e7e0977addfab91a7572915da53443ca867d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 19:23:27 -0800 Subject: [PATCH 015/175] changes --- kubernetes/linux/main.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 834446e77..39d742fe6 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -540,6 +540,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" fi + fi else echo "in rs..............." /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & From 06dea3369e6520c1c19643ad4fc16098f526d145 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 19:38:12 -0800 Subject: [PATCH 016/175] changes --- .../conf/td-agent-bit-prom-side-car.conf | 2 +- .../conf/telegraf-prom-side-car.conf | 64 +++++++++---------- 2 files changed, 33 insertions(+), 33 deletions(-) diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 575981b70..4c7be6959 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -1,6 +1,6 @@ [SERVICE] #Default service flush interval is 15 seconds - ${SERVICE_FLUSH_INTERVAL} + Flush 15 HTTP_Server Off Daemon Off storage.path /var/opt/microsoft/docker-cimprov/state/flbstore/ diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 85ec8a89c..7a5e152db 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -726,45 +726,45 @@ #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] ##npm -# [[inputs.prometheus]] -# #name_prefix="container.azm.ms/" -# ## An array of urls to scrape metrics from. -# urls = $AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE +[[inputs.prometheus]] + #name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = [] -# metric_version = 2 -# url_tag = "scrapeUrl" + #metric_version = 2 + url_tag = "scrapeUrl" -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" -# ## Scrape Kubernetes pods for the following prometheus annotations: -# ## - prometheus.io/scrape: Enable scraping for this pod -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to -# ## set this to `https` & most likely set the tls config. -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. -# ## - prometheus.io/port: If port is not 9102 use this annotation -# # monitor_kubernetes_pods = true + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# #tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = true -# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -# #[inputs.prometheus.tagpass] -# # operation_type = ["create_container", "remove_container", "pull_image"] + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + #[inputs.prometheus.tagpass] + # operation_type = ["create_container", "remove_container", "pull_image"] # [[inputs.exec]] # ## Commands array From 00a90340007090b8ce04487e19b739fea5989cd0 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 14 Jan 2021 20:03:36 -0800 Subject: [PATCH 017/175] adding monitor kub pods --- build/linux/installer/conf/telegraf-prom-side-car.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 7a5e152db..2f6abed52 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -746,7 +746,7 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - # monitor_kubernetes_pods = true + monitor_kubernetes_pods = true ## Use bearer token for authorization. ('bearer_token' takes priority) bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" From 3f430c9862ec2190fbf9a97a996be69475fbdda6 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 15 Jan 2021 13:13:56 -0800 Subject: [PATCH 018/175] move configmap parsing to sidecar telegraf --- .../conf/telegraf-prom-side-car.conf | 79 ++++++++++++---- .../scripts/tomlparser-prom-customconfig.rb | 94 ++++++++++++++++--- 2 files changed, 146 insertions(+), 27 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 2f6abed52..de29209f1 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -725,28 +725,32 @@ # insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -##npm +#Prometheus Custom Metrics [[inputs.prometheus]] - #name_prefix="container.azm.ms/" - ## An array of urls to scrape metrics from. - urls = [] - - #metric_version = 2 - url_tag = "scrapeUrl" - - ## An array of Kubernetes services to scrape metrics from. - # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - - ## Kubernetes config file to create client from. - # kube_config = "/path/to/kubernetes.config" + interval = "$AZMON_RS_PROM_INTERVAL" + ## An array of urls to scrape metrics from. + # urls = $AZMON_RS_PROM_URLS + + # ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = $AZMON_RS_PROM_K8S_SERVICES + ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - monitor_kubernetes_pods = true + $AZMON_RS_PROM_MONITOR_PODS + + fieldpass = $AZMON_RS_PROM_FIELDPASS + fielddrop = $AZMON_RS_PROM_FIELDDROP + + metric_version = 2 + url_tag = "scrapeUrl" + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" ## Use bearer token for authorization. ('bearer_token' takes priority) bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@ -763,8 +767,51 @@ ## Use TLS but skip chain & host verification insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] - #[inputs.prometheus.tagpass] - # operation_type = ["create_container", "remove_container", "pull_image"] + +$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER + + + +# ##npm +# [[inputs.prometheus]] +# #name_prefix="container.azm.ms/" +# ## An array of urls to scrape metrics from. +# urls = [] + +# #metric_version = 2 +# url_tag = "scrapeUrl" + +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" + +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to `https` & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# monitor_kubernetes_pods = true + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" + +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# #tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = true +# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] +# #[inputs.prometheus.tagpass] +# # operation_type = ["create_container", "remove_container", "pull_image"] # [[inputs.exec]] # ## Commands array diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 7aad580ee..195502a85 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -7,6 +7,7 @@ @promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" @replicaset = "replicaset" @daemonset = "daemonset" +@promSideCar = "prometheus-sidecar" @configSchemaVersion = "" @defaultDsInterval = "1m" @defaultDsPromUrls = [] @@ -108,6 +109,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu def populateSettingValuesFromConfigMap(parsedConfig) # Checking to see if this is the daemonset or replicaset to parse config accordingly controller = ENV["CONTROLLER_TYPE"] + containerType = ENV["CONTAINER_TYPE"] if !controller.nil? if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? @@ -118,16 +120,16 @@ def populateSettingValuesFromConfigMap(parsedConfig) fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] - monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] - monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + # monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + # monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] # Check for the right datattypes to enforce right setting values if checkForType(interval, String) && checkForTypeArray(fieldPass, String) && checkForTypeArray(fieldDrop, String) && checkForTypeArray(kubernetesServices, String) && - checkForTypeArray(urls, String) && - (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + checkForTypeArray(urls, String) + # (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) # Checking for Boolean type, since 'Boolean' is not defined as a type in ruby puts "config::Successfully passed typecheck for config settings for replicaset" #if setting is nil assign default values interval = (interval.nil?) ? @defaultRsInterval : interval @@ -135,7 +137,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices urls = (urls.nil?) ? @defaultRsPromUrls : urls - monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + # monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods file_name = "/opt/telegraf-test-rs.conf" # Copy the telegraf config file to a temp file to run telegraf in test mode with this config @@ -152,6 +154,77 @@ def populateSettingValuesFromConfigMap(parsedConfig) new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + # if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) + # monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + # else + # new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) + # monitorKubernetesPodsNamespacesLength = 0 + # end + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") + file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") + # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for replicaset" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") + setRsPromDefaults + puts "****************End Prometheus Config Processing********************" + end + elsif controller.casecmp(@daemonset) == 0 && containerType.casecmp(@promSideCar) && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus sidecar custom config settings for monitor kubernetes pods + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for replicaset" + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultRsInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + + file_name = "/opt/telegraf-test-prom-side-car.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf", file_name) + + puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) + fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting) + fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting) + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - # - to use defaults in case of nil settings @@ -164,16 +237,14 @@ def populateSettingValuesFromConfigMap(parsedConfig) end File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" + puts "config::Successfully substituted the placeholders in telegraf conf file for prometheus side car" #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") + file = File.open("telemetry_prom_sidecar_config_env_var", "w") if !file.nil? file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") #Setting array lengths as environment variables for telemetry purposes file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") - file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") @@ -182,10 +253,11 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "config::Successfully created telemetry file for replicaset" end else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for prometheus side car, using defaults, please use right types for all settings") end # end of type check condition rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for promethues side car: #{errorStr}, using defaults") + # look into this setRsPromDefaults puts "****************End Prometheus Config Processing********************" end From 021015243d406c6e65a74da0b5ecf2ec16d368ce Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 15 Jan 2021 14:06:37 -0800 Subject: [PATCH 019/175] more changes --- README.md | 1 + .../conf/telegraf-prom-side-car.conf | 10 ++-- build/linux/installer/conf/telegraf-rs.conf | 4 +- .../scripts/tomlparser-prom-customconfig.rb | 46 ++++++++++--------- kubernetes/linux/Dockerfile | 2 +- .../linux/defaultpromenvvariables-sidecar | 5 ++ kubernetes/linux/main.sh | 13 ++++-- 7 files changed, 49 insertions(+), 32 deletions(-) create mode 100644 kubernetes/linux/defaultpromenvvariables-sidecar diff --git a/README.md b/README.md index 3eec1f344..6159ddbfb 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,7 @@ The general directory structure is: │ │ ├── acrworkflows/ - acr work flows for the Linux Agent container image │ │ ├── defaultpromenvvariables - default environment variables for Prometheus scraping │ │ ├── defaultpromenvvariables-rs - cluster level default environment variables for Prometheus scraping +│ │ ├── defaultpromenvvariables-sidecar - cluster level default environment variables for Prometheus scraping in sidecar │ ├── windows/ - scripts to build the Docker image for Windows Agent │ │ ├── dockerbuild - script to build the code and docker imag, and publish docker image │ │ ├── acrworkflows/ - acr work flows for the Windows Agent container image diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index de29209f1..87f20e6ab 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -727,7 +727,7 @@ #Prometheus Custom Metrics [[inputs.prometheus]] - interval = "$AZMON_RS_PROM_INTERVAL" + interval = "$AZMON_SIDECAR_PROM_INTERVAL" ## An array of urls to scrape metrics from. # urls = $AZMON_RS_PROM_URLS @@ -741,10 +741,10 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - $AZMON_RS_PROM_MONITOR_PODS + $AZMON_SIDECAR_PROM_MONITOR_PODS - fieldpass = $AZMON_RS_PROM_FIELDPASS - fielddrop = $AZMON_RS_PROM_FIELDDROP + fieldpass = $AZMON_SIDECAR_PROM_FIELDPASS + fielddrop = $AZMON_SIDECAR_PROM_FIELDDROP metric_version = 2 url_tag = "scrapeUrl" @@ -768,7 +768,7 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER +$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index d81196330..b63bbac22 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -554,7 +554,7 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - $AZMON_RS_PROM_MONITOR_PODS + # $AZMON_RS_PROM_MONITOR_PODS fieldpass = $AZMON_RS_PROM_FIELDPASS fielddrop = $AZMON_RS_PROM_FIELDDROP @@ -581,7 +581,7 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER +# $AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 195502a85..b62fe6244 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -18,7 +18,11 @@ @defaultRsFieldPass = [] @defaultRsFieldDrop = [] @defaultRsK8sServices = [] -@defaultRsMonitorPods = false +# @defaultRsMonitorPods = false +@defaultSidecarInterval = "1m" +@defaultSidecarFieldPass = [] +@defaultSidecarFieldDrop = [] +@defaultSidecarMonitorPods = false #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @metricVersion = 2 @@ -65,17 +69,17 @@ def checkForType(variable, varType) def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) begin - new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) - new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") rescue => errorStr - puts "Exception while replacing default pod monitor settings: #{errorStr}" + puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" end return new_contents end def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) begin - new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_RS_PROM_MONITOR_PODS") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") pluginConfigsWithNamespaces = "" monitorKubernetesPodsNamespaces.each do |namespace| if !namespace.nil? @@ -97,10 +101,10 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu end end end - new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) return new_contents rescue => errorStr - puts "Exception while creating prometheus input plugins to filter namespaces: #{errorStr}, using defaults" + puts "Exception while creating prometheus input plugins to filter namespaces in sidecar: #{errorStr}, using defaults" replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) end end @@ -207,10 +211,10 @@ def populateSettingValuesFromConfigMap(parsedConfig) (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby puts "config::Successfully passed typecheck for config settings for replicaset" #if setting is nil assign default values - interval = (interval.nil?) ? @defaultRsInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop - monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + interval = (interval.nil?) ? @defaultSidecarInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultSidecarFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultSidecarFieldDrop : fieldDrop + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultSidecarMonitorPods : monitorKubernetesPods file_name = "/opt/telegraf-test-prom-side-car.conf" # Copy the telegraf config file to a temp file to run telegraf in test mode with this config @@ -219,11 +223,11 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car" #Replace the placeholder config values with values from custom config text = File.read(file_name) - new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) + new_contents = text.gsub("$AZMON_SIDECAR_PROM_INTERVAL", interval) fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDPASS", fieldPassSetting) fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDDROP", fieldDropSetting) # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - @@ -239,14 +243,14 @@ def populateSettingValuesFromConfigMap(parsedConfig) File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for prometheus side car" #Set environment variables for telemetry - file = File.open("telemetry_prom_sidecar_config_env_var", "w") + file = File.open("telemetry_prom_config_env_var", "w") if !file.nil? - file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_INTERVAL=\"#{interval}\"\n") #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") - file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") # Close file after writing all environment variables file.close @@ -258,7 +262,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) rescue => errorStr ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for promethues side car: #{errorStr}, using defaults") # look into this - setRsPromDefaults + #setRsPromDefaults puts "****************End Prometheus Config Processing********************" end elsif controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 34ab133da..d5e879cc1 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -17,7 +17,7 @@ ENV KUBE_CLIENT_BACKOFF_BASE 1 ENV KUBE_CLIENT_BACKOFF_DURATION 0 ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* -COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs mdsd.xml envmdsd $tmpdir/ +COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd $tmpdir/ WORKDIR ${tmpdir} # copy docker provider shell bundle to use the agent image diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar new file mode 100644 index 000000000..71f711e19 --- /dev/null +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -0,0 +1,5 @@ +export AZMON_SIDECAR_PROM_INTERVAL="1m" +export AZMON_SIDECAR_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_SIDECAR_PROM_FIELDPASS="[]" +export AZMON_SIDECAR_PROM_FIELDDROP="[]" +export AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 39d742fe6..a2e32ccae 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -225,10 +225,17 @@ fi #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then - cat defaultpromenvvariables | while read line; do - echo $line >> ~/.bashrc + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + cat defaultpromenvvariables-sidecar | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables-sidecar + else + cat defaultpromenvvariables | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables done - source defaultpromenvvariables else cat defaultpromenvvariables-rs | while read line; do echo $line >> ~/.bashrc From cd045aee09f64b83460f310905228faf5c942412 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 15 Jan 2021 15:56:27 -0800 Subject: [PATCH 020/175] bug fix in main.sh --- kubernetes/linux/main.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index a2e32ccae..7dfe6259e 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -235,7 +235,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then echo $line >> ~/.bashrc done source defaultpromenvvariables - done + fi else cat defaultpromenvvariables-rs | while read line; do echo $line >> ~/.bashrc From a2e43dd6990555a7123e8cf2c9da3f0d46996388 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 15 Jan 2021 17:04:40 -0800 Subject: [PATCH 021/175] fixing bug --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 2 +- kubernetes/omsagent.yaml | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index b62fe6244..48a3ce9f7 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -195,7 +195,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) setRsPromDefaults puts "****************End Prometheus Config Processing********************" end - elsif controller.casecmp(@daemonset) == 0 && containerType.casecmp(@promSideCar) && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + elsif controller.casecmp(@daemonset) == 0 && containerType.casecmp(@promSideCar) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? #Get prometheus sidecar custom config settings for monitor kubernetes pods begin interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index aa844262f..ab307e010 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -461,6 +461,8 @@ spec: # value: "my_acs_cluster_name" - name: CONTAINER_TYPE value: "Prometheus-Sidecar" + - name: CONTROLLER_TYPE + value: "DaemonSet" - name: NODE_IP valueFrom: fieldRef: From c0e0424e384a0ebf8dec0453ca3ebca80b166b4e Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 15 Jan 2021 17:23:00 -0800 Subject: [PATCH 022/175] adding nil check and some fixes --- .vs/Docker-Provider/v16/.suo | Bin 0 -> 32768 bytes .vs/VSWorkspaceState.json | 6 ++++++ .vs/slnx.sqlite | Bin 0 -> 200704 bytes .../scripts/tomlparser-prom-customconfig.rb | 4 ++-- kubernetes/linux/main.sh | 9 +++++++++ 5 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 .vs/Docker-Provider/v16/.suo create mode 100644 .vs/VSWorkspaceState.json create mode 100644 .vs/slnx.sqlite diff --git a/.vs/Docker-Provider/v16/.suo b/.vs/Docker-Provider/v16/.suo new file mode 100644 index 0000000000000000000000000000000000000000..2df1c2fce1b495b95b9383d553ba04b50c2669c5 GIT binary patch literal 32768 zcmeHQ3y>Ved7cvzvIJNN%=>jZBq0m!-o78lA#}QjETlt+PDrv?5VO0pcdOgSX?IW3 zNq1O0lqf{lfO*I`P{yuML>N^NP}q=AO3ERa5bUy5E}Oyxf};>j+*k*DC?@0Sf@^8dm`p0lo-W47eJw1YiMJ0nGq% z#SU-)oB+~07gx%x+1kAqSK?r}7I7b7DWKJmE}niie``7Z8}I8`@csA2_*WYlZBUQ# z{|NA&8vkGdg5K+mazK{>e5Q~8M&zYTAm7df%m6Uo99;GB&oNi&_~&??%<)f}&p?^H z<_C`d>j3)rH`X&h?`P%y4)Ehnz+Hg50eb*{1R!q+e}ecufIrpJ?#1=1TKqm-?*}}f z-9L!yLx6|1`~BMW6rWGr-ZYi%f2!S2+;-O~GpB!Yb|@UEe^Ljej>a+z=gLjx{Bc-I z(&vBb{e#fMlduOAKvoOSeK-GZeiiyZ^+hxKcLywjR6fGId*D~~f7&!=>pu^9-=(69 zupBi`N&`|HHb)9qv-K8lQ>UKR(107x!DjIfA}@7o1~cvNQMGK5KmQl)pEK9yKQDCu zbtJy7Jurjct6Cg|N)q-D?E=~vX6sM=Z+qe5s!AnZJycb|1Ygo|^IB<93E00oq=0Js zrd7)~hEd277?48fnY79SNDssEl3^VyNXZ}`#2dW2VU6ke_B?&wKm9cH>#uYca(eBmowpA33$NIdhqtD-+%*t&V(z@gNh zlC}5Ep4+s(5dZhp zng4cJ@!1~qlUd2s6Vpv+m_}_~6^zl~-e!&+?NEz}WB=w`5WmV|Ev?=KKV(6CEg42!T>f0z$5>OK> zEB|ul1W{_~UzYef9ry_A)YcvVcSR5AMXhLqWl%qIzchCG?58!*-$p;7+4fH&ug-tU zd-l5&I4YyQ*;?zCp#n-WYOAx&h5nhPss;ELX}Hbe4pzWJ z-X>frKbWr+(9SFN+=*u{06i6Y9)El02S>GfQ^7J12zF}1_S`N0B!|r2HXZ< zs|Eof0O?k6jR5)p{eUQ73t#{c1H=IdfL=%Al@f6Xunj=_b_ZY>uoJKgFaj6_+z$8( zU^l?b{6CfUKS-Ex0NcjGgbS;pfHCBVRPp+RUzrI0o6+OLs*I!5zeQ>soBz2ZKs;d@ zt#V2sp_6Ax_EjN&{ur-wEHjwvf3z412l}5Vr_KhD4~28(rtZb<8(NYX{BBi>w>sZtAkR4Giuv$H zNEFV_QT(RIw1g}zX#WQ4bBzGA{HM&)OEAv2-Jpb80KH5p*u_1-n!q2m&@QCN>MWf* z3&Z{z@^h|aD20Cm zaIh8Dz#8V2e8>-uD(X*qmuUr+!rzXzrzMaC-&cVDN!$-&_A|Bv_3wEo>vDkE`qTAo zhW|$V>wrCp8ITfaQ02U+0Q5g~U7BT@#XsMGe=U0bI?Vj^IB@l081Wc-pwP<&j#9sk zsQu33Fq^TR+=gdAnRHOgVUvaQOltn*-(axfEMW1gJ zJfQ!fkjytyu>|#}@0~m~TYoR|KB*P52HMXSaJvt<1n#Vkzh2#~qh%~|X{w_Cdr;Oi zt-N;Bo_1XtS{r5D_Hk4cL98Wn{iz9Ys`F;+Z+3d|5yI1#y`jxnBi6+t+IT2Et zmbP{#GO6L_rL8Ly+vHSKPGovg$!*cFlDa{TD@bqdQBvt>I-`VF%jwKbX(g<_v9vHr zt!=54JiI2dN*P`+$A*+Ok#r^%P4vl$;dR4Zs=smxGjb>i9?PMO-V&~6o42`ptj_DB=^;7R zn;8m6lh(GKLn+0oBk7hCa-Wj2_Q;_DM64akU@SQtR}z_Z$z&`jr<&M8hT7HJ|LrsQ zza1N%yK(fjt-IHzU!VKLInRD4x$4Qs?!NTszb`rWz*#eo9iZu!)7RPG>rKDkK>x2E z-@NyU^Ji?j;wL}d{mw@lH?927>1TXz;Dt|Y^U@FOIsDuQh3IcBJX(tM`$_E_Y9BNX zHuQ|rqp`pkJ|V|%a{)pNiyu`F(Wb&8IZ7u}kD18(%e42W<`c0Ieh`+ctw@t#Gj>Ma2eChhKbT|7EiH! zS-OkmD4(v+<;I*7Ni&u+)$s;H%ToT(CPI& zP4)0zqxwj9!RzUOMct{MZRo`A9t(V+Jm=s<%#Obf)XjoieAb2->)HlrhbP2_H~H6& z+@qNN!f1zFytG&8izHSC`d*6#`oU>GO_!c-=xDLwZ&#b6tJCZ1v|E;YT~3SL-eI$} zb=uo3Hd~vm-EZsa>S}k6jtX>SQY&rGEQDLLIA9}cWZC>^QjHl@+h zIHolPI+CFwDneSWs3oqX`vZfiWIV2577V4WnH`ytkzKy!PM-sR7LUW`AbPvS=kwZt z-e&9adEH)rhhy}+bs;u1l!{qW%GMzzovDP{Kd%e5gUN8(x+R@VfT@04n`gPR&1-45 zw>d2?o6T=&bNGCgPDfXl&)@0ybhX(=57dRsU@|!n92$nwTnViZBkI08_Uq`fldA8Y zE-i2FUDslF_&D0^c2C&h@H;Gu(;K$9?4htlwt2l4B`AmFkSpX1g>ih+$Oy_dL^jrOGEG~b&zpF1yyk;>JsEoxZ_>{3WVf9876 zQmkyut`mja{gZ-KVZpB=ZN(jF&w$R1x?|_Wpptu3!=UR#e)|YB@5U0-*QdU?D|8g^^b1!vpBHwI|RnH$=p@n zEJ<8{TB6}Li=XTD>$D>B>tA^PvyqZ4ZpK`|e+90@&5--F@ZDl0e@=Upk6&OCtF9b* zZIA_nkT+bF<4!bk#u&K&n0PNkqFMZ3L|)x*L{I5HO~y>c8H}_lC%ID6(SpHi_Rb>Jr2+spL~qPWddp`ta?V(nsz&up7_^8m*v$ds#Q*8b>d) zf~z+0kQ|TTYDWrdAVy_v`RG7esit+6S zl^h*GRnt_^FNLh3@0R0S=!2x#ft(?5*Nzy+m%a~}`@!lhBaJ8L#NON5_DnDM;{qSa zqw#HCY=m=NsMc+6!w9F}oE$5Dl9ILXsnSzHtwL(ejI$86oWHDTZ*_ySKDF%5xx3kPVlfoJ9V1$7b5_&g`$Mt(70 zWjV~5Q85mb1SPl=@UEBI?}Ay@V;u zufhtEp@GxRh=8Azu#|3;xH@keL36!(sBu4$1S)GiTj{!}ke;Pyt03Cl13tS|Ugp|N zv~l;>bZogm3)iX1!E*KpcdS2Lmb$6&{gw6&i4#h-wLz#!(OMD~WjPz<0`RW35{O@6 zq|Ef-Z~S~yOajs2Tkp|q{5ppCMqzC=I6GQa%sAXhseKdA8Q8Xdn@GA z?+n+vPm%ZRRi!;1A+%2j?L?i4o=@Xg zy%cu$-Z7rC%+KX)mHDNUMbp;tzAy<<%y}<>b-7A z33`6TH?s0AZx?#0@F&yX#rc9Y5GT0|_Zm*Dc`75(rVW#}x{&AlBPoO^q za5tdN9i{v!V*cQ%(t6Jy#;>V~@50wsqEK6Ms(;j)%1J!#dmpU0)hGwVS>bZlLB7Ng z@&P-S@kQtQ?0Xu&Uq_q{n0gyYUqR(}B5JEsP}37$N%qcCHaImyJ4T!rkgNaC0@byZ zLmu>GYN_0ZFkTK7^SPV_I%_-Pyr7eFBzdflIC{>*>Kr2@mFrRZJ_MdLs;$0H4MRwD zwy06-E~f8uchYSYN`>F1r(O3NWJqGbDIPM)KoyB$y=x>NGGJw1?A%Fkb zNx1)SZ10`f{corC|7G|8t${XC?*AWA!WIks{t;Jpxz2Bv|MX}a1~8bz?+}>_{1QL$ z>Y+fEaY@`Udss^~hu=A_3_XGP=`W;*jx^}$rLLSM4{I6B;dd1+uA2H&2h#CR#`j-n zCCvwzt^cX-zhwJKzWlBA_g^rPTz&72fBlc|y!eNY{Kp~>{N~`Sf7@{Jf4yZ7K7H)S z;^WtS`p<0H91+WAj-?fTWw5>F(3uZ?`;MNj%YNP18tdD!^JRVrCJ-Eo#=?PEG%>Uz zpnf?f7DFn&Y7z-#aJ;Y&r*JL7XvX5h=UWnykzG7m%b=dI&HE8gY_`!6+Y;MImpq0! zup*k_q22){Wzj!DV;Zx+I?Re%=Yp8UCt(7Ev7tUxInb93q*EbWtnp;nYz-YAy^8#F z;{u*(M$IBPaBPw#5^hZ!F&+COs7-n%m|zW>Pc-~Ic4 z{i{#TdGz4mz5IOTxDLg%YQlpJ^|EAn`lr|lk6S-Hw({*&N50eX@w>m+zN&Tp>Ar9J z+GcJ~e)`nCv0ILO>s8j2!^jw_sXe}3TG@viXd;OZ+V;W0gTn;#$NdMAG|o4zhs`2< ziu^Sb^zm;duQ88$Vn5m(ABtt7mI#D`@u7+M-(m8lRW}PKrG!K(I6BQrBzJXkVG=NV z>Vvs}c;!)>s%6jEkw4m-yB-l_wnOL-#GWfBX2qe9*bP` zUi|l0-nq$}Jl4a$Y@9tAFVw%wrR}X#{l|O4{oQvxbNKMjKWW+Rzy5^9A+Ehni8KcDFQ|Nnk_fKUJc literal 0 HcmV?d00001 diff --git a/.vs/VSWorkspaceState.json b/.vs/VSWorkspaceState.json new file mode 100644 index 000000000..6b6114114 --- /dev/null +++ b/.vs/VSWorkspaceState.json @@ -0,0 +1,6 @@ +{ + "ExpandedNodes": [ + "" + ], + "PreviewInSolutionExplorer": false +} \ No newline at end of file diff --git a/.vs/slnx.sqlite b/.vs/slnx.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..0464e6e52586d2e40cc7da2eebeb8506e2c2038b GIT binary patch literal 200704 zcmeFa31AyX)i}PZU9Gf|cGo!^J8>KzIUL8465n>xG;tg!aec-|(lmBmSz6nwWl7PI z#BI`|^gd|m!BGk=EzkofZQ327EQV`-Gb;f3OT$&RHOfLh%0` z3D}X<ma0`WjNG(mrcrgsN|dbfX~J0FqOIn>oL z(pA~rzpLxq%0p+A$GGCCguj9QT>A0K^@lQx9j{FmE4GqOU~^A?Hv7GEdVc2A(=eA# zc^a0GaJ;f}ptl#_wSSyCRTgL?L>XFn^9nURj2y)^5w|^YuJJPkMYp8N?sJpLY=wM|}*TKq;(UF1fe#o}3tA7Nzf`6mEy&G9BgVFGj zfM1WAQyVmt1S?^vYj@XBSAS>MaOEK*MOP>uh|iC&PX$00wdo{KZ=-{|P!J%7&W>GO z8Bx2sdb`q51~XKE{rfwHI`?)At#4>`ocg?>kRI*y`eyWrT-IqX z%O-SX_Ok5g1@kfWx$HF1_Hs)fuf1&4L3-Sk?0!8OgN~Ln@}K1FwvyUf{<5v49U7U@>@%O_%r=-A zX=;H+gt?N=pykn)kz$y>PI{Q#eA-dgC^z-0XObAJY>UlS(&aX)EW06340@w_C_aJ1 zc6OXSZ~bLcnR75zWi}Zq4Ebi~jWC@N7P1pyTTbqjY$dZW^3A75K2tAbMdzk8jejO` z8+#w70%a#VO>U=ow^O`CvDK^=Cq!Ps72|KmuZWYySjFFjP{OH`VvvnW8=m!4V^qJ*h=cl_+$m?Cc}C>9tcg7 zKF#!P8X{)4r(8PHjEK5V6D-teYX>b-NnIU(khGlGVbIa^iI{#kp@%?g%X}0v+D%sE z`Vch!jgYc7xreuvl$G%(b&@CJjUh`$9*0<{smZaBe^^V~?+qrP9)gDo=RlRCAJQV&45-3QZAc2Ae3KA$tpdf*Q1PT%;NT48rzg+^C9gwWt)~f_u zKX#{geuk-qIJTMnxe;f9R9wSkQ&YQts>R(hS?_n(*ZbSu?M=E5$^Gq(&28;oh-=h! z-{zJEpS!-PrP1Bg+P2y4ZS?uwdP{S?zPY`*wMB>E$;Jj>OIxeEp{?HMZt}JG-QEUo zy}Qxh*4i{V0HMc3+Fz+wR}& zZkW`k8e7`^ZIe^2Zhw12eS@#r?{05x_5zU(M6Jz@?uM!6wwB4M$*Ik)Ks4oRZEmk` zcenYyz|&-t&pqjHYIfJRH2WJSeN!z{E%k1Fs=ZC0Y~SpjoNAv0Iq6OA$;svh_vW_E zn;SN7_O*LkTHI3&jsD5z$p&|;e{&lZrnhx-I}_$)i{8}eb5FJU^_F_Q0SfW!?k2s# z>-N?+Lg@_+^^^6j?eLcRGONgK(ny_x^$)j0I>U)jyrD)EId@}cRQJYp|3Gv!;>RDG z>o?UmZED!uP+wW!wxy|gOMT19Ho?kuUcL=dU#R=yo#D__U^)>cn-ardXbAZ9i_xG_ z%?NfT0zrQURjhhTAgX#2Q;dxr1-nD|Xl!gO5CY3_FsMhz#(a7- z4m(1y1)@*uuvg)Yhat2x9lFN|_4s0#Wi>NEr%M>|MyK^SloNt2rgSXY46yKB`eb7I zct>cyb8xhpGI;{gI3cYbj(el=M1;K?8eYiTu-Hy&M#&^%MJbLGRTh!!TtrHsT*OAf zG{ejcu^xT?(D*W5izqTRyTv>{IWn)ZTrsrCu&UD^~p7XAtn zC`h0nfr11I5-3QZAc2Ae3KA$tpdf*Q1PT%;NZ^zbSZot&`LtKbE^pjxEs=yqQ`nF` zrGs09Pv5^-0Ag756Q5e@=_P94;}-E|Ki^`j;I^0p&`*;)xl|O~G*gp*rOU|+WjtH@ zOTeg;m0N6qOww%8viPq$fJD+}IPI<46WZn4jP{`R(kWlB5Lu8wK>`H{6eLiPKtTcp z2^1tykU&8K1ql=+P>?`D0t-r@OxVgV;PfID3!C^H#|i6FSlws4O^6jjCz=Zua*z-( z{uF)ymLj1>&Ts<2Z}Ua%1?0O*`;+$H+DqUEFs%)0?b<5Mq5ik}ocf6Rkb1ouRmap$ zwLtJwJ`=Z~FFI6vgP(|Nr!;XL2D z+v#?yju#!zI6m%pi(}TY!?9HUu6&<-wOlXD_J{0O+Pmx)+Y`1sY$t37ZMASBzz?KP zO1DXuO6N)&t-rB;*?On-gtgmRDgIvkn)n{^4dSGDwpb%-!pp*Ug-3+jg;}9bXcEdT zf3ZAg`LyL0OWe|9S;hZ_{}KNve>b1xef(a2Juh=;*C>riTm>&n-oUY7B;tw8`|M9Y z{GBZu9bTz>afJsrK0_j+3C#q0&lYw{+QCU#p)&fyD z5uXUpW#v#hOJ906F{x+fz3M{xR?HJ75Sj{m;>Y4C=IR0x*9FinwM*;jTo8ko)`ET5*LhPb#l0Nys4Ow~?FTnFUiy=cyz zB8j;p-Y7V=WRh0sRCqq(UNkY82>FA$CpI%_NVnWii=r_Tu~|Ks5$(fhyVOlRfrv|y z9@UK0#gh`Z9HMFJRB(w`;>v-+KJP4uBB-kd{mKhiO^4@#jNRzuxDo4_khmU5mQj%? zAGa?Ni-+d|7s2b9=+~bwaod5ulZX&L7mMj#(P%i@8=judq+B*Gab>_~%ohzr;;}KK z#L_XWc)OHxbUJ9nEIUL~FkdKzgg^A-#%1SWxGWu^->A}X=pDresU&>iIjGoFaP_$o z*9hd`m=x5{2~Sd=FH+k*Mo}9NNZcNv8N%;$!OJQ%4KGCNop^|4QkU+hZ57Uc*wxqN zne(UeSvx9m?U2vl{?Qn8gh4&(gWdqYGBa2^!pOq@Sa)c*7wX3k?8URl)(uPCR?IEz zAJV=4`GvBp8KRAPV4$xj0KQ`AaYmVsWtQ8pPvV9l!+z)^2g1==cu%mx_UUus=z^J7 z4oci6mZm=(CyZp~S~oyDAm+L&5Su;qtM}7aM`G|YTj-Tq`y_50N=?1VnXO& z2~SbT+8*S^E=6^0GCH_=`X```MB}lEj-kF9qj8j;lU3cRn5u4xtB1k{AuY%f>WQ|N zIe&^|&0dLXCE}x^+n3Oz^PMx$$e`V%s4Di*F~Sfqp)wP!*-f+AKa7HiMZ7*e-C(Q+ zr=Bj_dlSTg(H#i_nR_N2gtIVW?m);r@14V;Wt~A^y^D5@C^Y+URClwU=JCz>Q#2Kw zlr@8f%su8zHtm$SK6t}OA~xfm(StA`;_JENVK+D-`rT1I5{{V@)OSeSzWfPblfWCD z&rQ;?L*fpTB*CD2DjJ3|{E=gBZ^-W^#X?!WfZOYf8n56E%wexHz3cYv5;u27S&%o( z&0zi6bf}92B07-ho&*l{hREjUf+>+5*k=83*T{9iakkspi+#(S-Ox*rhKH|zI zIy4fWU8Ch?be{4?V`>MlUN@D$=_M97NL(>z2dyg>Cw=n4)>|s-AyW>gqM!~<0u*WV zQ--m?=(OdVp%&5vFl6ZF=#ot^T4A&?IT`7ec|aq;2&NGj;l*y4P9eOvyR)l*xGNRB z5rgeg4+sq8MS>qJnqO2WaZ4aLFgI-kE!_a~E(Gl*3YZaES_{f1WZOBgt83suf7j5k zk@M2^l%CXd%%ERdLqq8RY|xgj!(8l=$7u4D9%lQ}wKRXvG#uG99fIM@NV9Sc2pvks z7;iit4NN9LL`H1&YC7g+XgIBg%689JzjV(JFRiZ5L(^iS>AreLbyXgk<}}S?zkC1v z)s=Z@noKlzedA-*6?tgdO*H*4etGq(JnD7*<}}e4fAYp3SC{9>sLe!knWd+4C2g_h zmp3Tr5~HGw4)?*p5%4CDp&vOY}PXR5h`XQd^fO5DG1y3@%3F zUF?M32RXyo=&(P1_b)3d-*(G#2k;7E;m~B*8$~@tCQ)VIe|ND=`!K63BbQ=3D$24n z9rnO#Nd)Q_ACYgOo6Ryr7Xj-BrG8$0k9v(duZGkKbwuq^ zJJi)`k@8FBbISXbw)qB{tyfqtwC=a=u&%Po;;+Q7i;s$* z5FZrp7H<)+6pxE>aZKzLo5V6v5PmLvUHF9XPT@KsCL9u4g;j#x@*B(dEuXPGWVzRJ zrR4%klVutIEB-nDoBYH4JNR4qi}(-hd+%D+9eAmd^{Wu#*Qx%YT%b?HG07sp~sRA zK{XN-KC=F#GdHY2xhu+rZDyiSgsjAIArG;a*9iyBv2!}i13sAeVR__3*A%7$Anj#q zg}w}0|D2oB$4pcwD}_!o6*V`Q2$FW8n8%sCK@}=2a1t7*-|)!2y-Mh^j4->(Nar9~ zALBEYfqDw}IwAo^L57kr8rBS3jA2L#p$#r)8x0p6HdqR;+$|g2Q15Zc{VGq%1J=S z=tY{anvW$y;TT%SBJ&p`T^`H8xi(=54`Y}p3(MtLI05T;mr%|d)5lU(sN!S3V~Kbm z2)SJ363Q%Wkmat$uE8G0O}|%|e#_W+qlwA+9$k-^zToP>X_vbdnKIT}#sUM@WV^6c z7~zI8*;ys*;fJ8vEoiqmfbAwnovdFX)ES|M4!L=S&<+LUTMhSMqfY6R+}jkPf=@l} z&v-f~<7tyaSk5TtQG|y2>A|w{+et>(2z~tNmXkZz3H$O9Wi25)T7<(u_DYwHJDP;K zGe|}%A+=Q8t_f8ZP}sqMItQQNao3Yq%xmKrL0kI*cAa5tC~RW1lndK=L-);Dah}~P z!a6g5^ebNGZ9|qa^P+7GR+eX-Bf@$zU*sz;X6rJcp3gOybYqV)EW^`b3Fjr_5eI$l zrXpc&iU31`D}`vl3hw8a**uiV$?K;FYd!Z3F(adh)r>@ zQs^R~3)v9|uuJ8)9rj}#8|H3LBv#K|h2xD8;>BLYmKP=XBH1gPGL%+kAdI=(EjVM4 zj)40uk@;iT4#_jwV2R8hT@3EABp|gT6UDKW2Bmgk4r7ZXVPxkeg8C16T$1{wB8zMp z?nd(T@?$YZv8DJ%9A7bts0y=K7F$#pdm|TOoun2T+?ALh^*EOCq+vaRE}Z!*|J_3H4>lF+S9&4IG7?lwS}h*n|&YzIw?$Olki3ML==xfi^Z{f zz6qygFg?^Ee>0K9F))PAqM?f>}?2@nuLdoXkDQr%ytAq}IVLMX`@^LELQZwq(Q=m%2C_sk2H(+O_0d9ZTH>*d@ zn@rq|WkS0V4I3*NaW^a#>WnzpkE8j$9tSwG_9iD29`(lIb8Dz_=q@*7%f|K!L(!!B zSX;}bnE@?LmtfOLE#6D9=@?IJD=9J`$G(-l*QIY1#`#XeC~z8EejjcSr-*ay_q|y8 zARN=a+%!&;kOZ{O-Q!N1Z^^8~W#4KZ`L-;o!UCBN?WOwXp1#PvulF-7xfS7 zZ`GHyVf)?Muk5#LKi7V&J*$1geyjcUu!n!CdQ>~#alPXTShpY59(AmDR5?~SmN;Av ztNa)FW%-v{hiX;+s{BFgQ@*L43)Y1_$`i_CnqT>d^04w=@F%!axlOqRyb7*TlG=y0 zuV{bJ-s)@z>p-1zjdrVcrL)|*%&9r;PTuiH$8WR;g`2e_%Ed}T32PTBD-@Svb^XQl zvUZ>Pg8D=CyI`yMn)*0+E__D)nEFBW0rg(>F7U0kM~P?o5-s)yBCRR=4^LDzZC zuRFi&e8l-_=N{({@uT9y>ag0U?g87zR<&8(q}GCsqg*Xf7pYF=H?Cj0UQk|Co>#uF zd>gDDpHc#fPZ?JZDEpKirAyhav@4B@Td7g1T;F#+N&YwAd;C$BkVdwju_c-rx zzR7t5*h?;P9&v`8Q{Yjt-`Vf@y5n)j=NzAKe9-Z8#}6IfcD&c|PRH$zTO47qnhZGh zIJP@l9h>C!a;5!A`xov1ZvUwLA^Uxf3C98Xi}FL@een)?M!rBk7i=kewd>_Jd9(ae z`9I~S}MMScVLXS_~6CP(B_xk$Floc;IqU)q0a|4;kV;$Ov=?MLll`?P(+ ze!xCp-(%lyZ?$i-*Vrq-2BX<+w!hk5w!LI~-u9gBDe&xg)b<(MM{Ez;?zP=%d$a9E z+f}wpZAWcku+dCt@30-P4cPYBw%b~5n`||<3R{Uyv)QD-N-s+`QMwAeLyk&eXqo2)TJHsKk~dp#v|eSs)Oyq!woY3otOu+E);-qk)>i8#YmK$S zT4L42m&E7A=ftPPC%`WB8S#EGDc&XCCf+1oBhHJ5#enD)4~qN5Zm~mb7wg4Zu}WMn zE)r#t7ycmpN_au|zVM9jHQ_Pgv%<%OhlTrvyM)_}|DYe9@=p?Y%M$*pHo;=yZ|6VE z@P`=wH-DA48Mor`x(BE;d>drhv9cK{4R#y$?)9_ z-^K7d7`~I?I~aaD!ME{mWB7K4-%9W;{A~o^%D;u-H!}=c3CQ7A{*46R%-@RQY2a_6 z=w=dn4Sy5EZ(#UFhHqf_^$cIn@O1<)<*#M<8iL38s|lXuuVVO0hOc1wa)vKs_$0$g zhF{06~pBWuVlE4;T718MSeL&r4+%( zk{GxQk%fnkA7u-_6ccby^Gg_B%{zk5cpniXNfp^AvrKqW?g&nENb6|4z|oDEc&_johaY)p4IBMZBN;1j8R^ z_+t!zl;Mvs{9%Sa#PGi{{6U64!0^KiKg95Z1izPiKNh=zdw`<%QS@Gl-a}~K&fU-O zeGK2r@I4H_o8fmc{7#1NX810K-@)*m1mDKp!SLG|ejCHLGyGO$V?B2pdAyZ-3&U?_ z_)QGIk>G2&TN%EE;hPD*hP#R3H!yr7!#6PedV;U!u4niKLF@YBmI8Wd)0*?}Sgun!W zaROrqOQQrHCNP4qC`@38z&Qeg1kMt8A%Ot`X9%1oa0=n1PM{y5<|A+tq3T6AegT0K z1fGv@Y#iaiF@(w?0?#AxAi|z=2|PgHega1c93gO+z##(n5jaTT0D=7k_7T{Nu%?H= za|rAva4&&-5L$O5T-Qb5E&@AoELCU)e2j$bAaFZ@XA`)Mz_SS4O5hd(+X-wVu$90T z0-FhJBCrwjZ$MCwU^9YE2s{Yf2sR?9L$CosErRt3Y7oc>)*^uQw!q1nhWRQgLTv>? zRTBl7w~&A$gt7x+(Mp6X${?2G7vt~J?Q@)nzm}Acrusbp3G%cQ;o>D2#uxKn zBCV09V;BE8J{9qglBvh4{U^=9;iSP|#+0I+KpQMItR#C;!T^E%wt!iH(tgFFJ;@2$ zowy>Y*1oIVseM4ZT05qNv`MW)tJdtW?*EPYJ@pH)j((SVmFiOm)m>_nTBEL1HRUDc zS>>b3HA+-D555IxRn{ny>ldykTo1rn_lRrM)d(xwa#xW{bbjCYAI=Ay?{Z%5JmB02 zD|sGv`=4<<=J=@NZpY1FHApyS9Al0_Shvsp_8Qx>wolsTY<;#JwrZPI`nB{4>7;Z}YLJRy4fsdv&#m9K zK5qS(^=+`;i&%TD&DJvUkK&84viq9&S@9uQ)4fT&UW|$th`Yr)aS{2V;B&BAyHQBM zPJgqo0)+o_%TtyQTJEyE8NMs%CEpc1#ea_f2!9jb#b3rx^9Mn4OT^Wdj9q#d9u={S zH}?9KLuFzspMSUCx|>iM4%g?c6?^#8ZSb$_a*8E9-QxEhUn17vZ__6K_yy}A7xRvK zzHNRO|0=|78Dtq7{VTZ3-QvNF_)~24ui!4L5&JT!^KJI8;7(SHof&l0c%0tuU%@41 zv6!P9{)@y4i)qLIHty}K#V-DooBk`f*HM|6xBZL4%S1QGiG>+E`9+};aT5faKCU3f z+@h5$#OBQKRFXMTN9H7Xw*SW`mWz#988Fwgirg6HOIDs6!`$aq=f*Inx$4{)<|R|gBIdTWXpy*@H#-IRE+({j+yZ*emBl4M3(ifV zLtM@=*8o*4hd{%%Y-y2L1&=B30N+KLSY}E22f(~fdJ}UI7#lY~X)9b}8E3c;$d%$Y z%ffyFppg(p4VM6utH8Qlq$L?Kbl(X)PditOd-zkk3-laVCT`*k*8tegX8x!3OU1fW zaLQv~rP$6d;4)CtL;6L^OMttLdz&U!K=FoaYR~@6=bCd=u>#64;y1a(}8gLfj4wh_)Ui=&MZ{ngLvwSrqgR$(;yl^3-NpJorOXCIj|ayHm|2CHFaI^`C7&j4v^`8U~n`j>#1GPc=a z!9vDAW20U5k^#-|zGI|nNC?|=_Z?j- z*78}q?xoRUvCarg?YoyACe=h+74BA+Mu=iU0v0bx|5j8o^cwr`npnk|H{eO6u>((r zt<)BL(JU#L?V=l-@I@CE!C;r(hCjqzNs^^D;t!5xKC2q3vz*O%-x2b%ncMN2&s?Ul zA-`_R262>6?X;ir1B~{Kpg5&SPrWI>uB}4cYFThw{#Ncyc5w+ucIFAMnOpO|1nEc% zZO#|XkT!X$?fIgB6J}dugPzO^bcbHF84tvh*)A^PVVB+_RDt?NHrf~ZHl(#u?4T4_ zjZCjIf8^0Zm^#&7{kmqNQ=50|ui@TMC2qGEyY6{5?62W&TqU-rD6;qLui~;Tx|BpVxmR z5DP~e=KOQ$BT&p&?`D0tE>aBv6n*K>`H{6eLiP zKtTcp3H)nHpc!s39IZK%Yar`eJZ+x(#wJg5LrZ(JfXU9}ipk7maF^d`&6!+ZnUSmo z?)Mw5J(JrUNwS8|e8r3vXSJKtTcp2^1tykU&8K z1ql=+P>?`D0tE>aBv6n5$F+!~HT)}H|BEEqD_;K#B-txo|63qgt^CT@|G59}(yBP^ z7uvVA$Fz@V@6>M8PH3~*L2VD51W;9||AGVx5-3QZAc2Ae3KA$tpdf*Q1PT%;NT48r zf&~60B(T^eaTcDJxa%x%!v4eG+4_NR9WR2b0(hC1E{0?HA!PjNnIgC-07EXKA^nxV zDuVj~Fysgg>HcZ@_}?f2NwDzTbuK*6?_wvyi^v&+M`ZkcjGX*; z)JBM|mnf|jW8&oSzl1=8ETX{6F)O?uul)(%vIcAbUTq1So`1iZP`lMd%2UeQlv!o7 zVsky~`nW3yXVtr$Uvl2%yvjM}>~yYmDsW@p2OUYr1&$q#YB;s;`|{`D?D%8yd2$_` z*Y_R!C+*kUXYIS}itT&0>ueF*F57D9_tICS_es}C=Sr)szqLMKy#&r)-)NP@Z;9^^ zr^I^UmqNhu2g@y%jr?!;ck`RMr(azK?C*1Md~=wW@fhGCeL8@b+>&b*4g}&e1AW8! z4x8{87fj_#AamFAht|h>ogBX}>CDWSSsF>r5G+#8C=Jg3OP z!d+iel@xbFfd`W{nFT^A6CM2>y$46SJBKF@3=H)Q4|a5RP3#)z>*(%(g=L>QGhkzF z)%GqBZpA@3p%jH{aDQgyKn9f+jk_R&vM!nd>o2ED4vEU0ZJm&)xtX_Psrjh$g(=+i zWzMFZkg^^wf`^pCP$Co$%<0{JxE&=h6@Y6sb3>fmb&}Q70jcWhXsQu!6pk9&4Hw7h zcw$l3BhXu|YjyyA72L#%r3@tEkwkoCKBA|80hMT7zTLseC2*rH2yd4@nV2R(j`Kf| zo6m-*#zqp=h4*zrqwNR=hv|iwu{=SL>}lD(4Uz$$n4>|%(qugX`Mq-2SwP;|nL)na z8x43T;p8Idf4lT4xY9dSTAnD#Mwi`N)1`KYX5eg^U2t|6_S{(3BarWs8@B*CQ~@%% zM~`=g=itN~-QOJ=i2C(tZU`jWC~s=Lrij)yg#*mO$vOUv2Oxb!u5C_}4n_l^ zxKULZ4?y}MxdJ|tGm4#<2*8a$6Yxhd5IiJrXas`VTA8#9CP{KtQq+(ueTs7r+Lku} zEo(zvbCLKw0g%o8^1gbA8W65k0uq8-v5@cItYl>zM9?`?mcDp%zX@ z09n3Mc5j9(S^MhOk3rpq{BWgrIOGjxJp%b#<>pO5ZnR=rmbn~Oyg3Mx-6F5`K(f`V z3FAX>C(;p}@NN1DWY^0TZXjdT*WvdQ8Uq8t)v{+J5Rf**^WCvdxWI1E8=vWbJ7tgP z+0SzPQh8OKks;3;6=GmHeq45MAo)SPkdw8HH;1!FeX}M>kmvZtwpF#K$}TRu*XPZS zyoV_}@*I!ZR@MM{Ss6$Lxr@c4*lGxp<8$(=b*C!q0=afA#FI9~^Sc6YZzxUrfPdnMHyVPuAnQ?%ms^*W0d+B4FAi0PM-AdXIljWW zas`C5syqs*;T~{?kIk7@swFq(}!%3drpRmhyMdWQr!`?eMdM?WfrI7WQu-R%f zAU>UxW84Sib`=O)TS;{oLIEwvpMX)yZ84|o( zZg=ID)g3wl^8$FJ%}$b2*~Jrg%4%?OlZC*&3fq(oH> z`amk>)btc;2N?-XVR9@XZ;&A!>y<{jj$U&L`XtAuSta(;;%~ zT)Dw!P7_B#bwl@z!Z-nq2HRU^h#c#an<}*_{kw`!E{)~dsL2|5B?zKXyzfUub zHOL!9$nDHJRjf|lD5R+mL<6w$A>9hO&3cq$)$%I1!)3uP5R>F89^$L2NEhhnKDT#p z5Hm7Q zQ{|PHTpZk7cZu_6*Bg}^ZS~p?twz>0#de?i8*NxiYPYE&`IGWu+qcxu!+HM4wNI(L z$lJnR0gUXEjqTFdKv9Gq*+b3N~ zXP@nmtHSYThZSzXt8hKw{I0#!S?xGvf1};zY_|_6Kh(Y^-)Fn^)man@sR|M(NT48r zf&~5rB!IUX_jv;$?j&AO4%!7yoTxV(!F$gQ{U(8tu!IF&F9L?p`8Y-zE6^B54)*Lp zEd^6Y@y2Jf)_xeT7dEm=&-1Ye-UgmE+lMjCpxvbtJt>5j$s1w7LOdJ?n-vx{w_4~) z$I$gcwuT)v^E!*X4JSea!@w}+a3Nk(JTx~F)%5^gPuw>P)+|_g#%A!AIgP(M zXZphaDf(;RXh_FjBjJQ^#*eo_<1*5RKl;EXFo{1<-Snbh@CU5!!6+ZPAX_jKWx`e- zR^`(=*tDo@gYzi-p~1fM@d|9?rEz(K+>YZN&pAtuF}%;7nwudZT{eRy0TUd;C~6|k zjymrQWOvXkyK|9!c+M8eT!LOo$SQ*#|L*EMYyvEe*z zV(1Jw-Dl=>FvTetE|Gx&=*c!{=6JPszAXRv6&Tw!*x2%zAvCEL-GeBCcHf<0c$9Ojbn?@?uyJ-T1QER>F;DZtIZ3 zWT+!{zYS)J+AIoU%3F`Egjw!t%+zqZdTo|%Zmo%SjhS{eN} z{6hJu@&n~N%D0p!l`kouS3V8541Pd)pK_1#4&|-Nt;!9`)nKbQ4z`Jia-rf^&R5P= zhLm1qx3WXoqBJQU^>ymJ8dYaizdEk&R|nL+YKOW-ZB*;j)$p=~zk&n`5-3QZAc2Ae z3KA$tpdf*Q1PT%;NMJz;kek`B=S~uj>mvjv2#gaLBQQ$fVFDurh6xN2I7eWRz*zz> zM0invz!?Ik37jHOC(uuzkHAR+y#!u>@Yn=_=My+i;24322t1F#g9M&S-~oh3_Y*iu z;0S@k1P&3nkHA3!(LQM9*vlswejRlzzm%d&C^|vWaf&Xc=pu^dDLO{cQG%}9Nl^zy zJFvL;b^^~Pa2tVV5xAAWEd;g`*hXM0fh`0!6WByxBY_PB))Tmyz)b{t2y_#;k-$0v zHxO7$;CcdU2wX?tS_0P)xSGIf0;>qDB(Q?ORRoq3xRSs!0#^{YoWN27OAscO5m-#% zQUaF{xR}621QsETX#}bSDg=^G0Al15fEf7%AVxj`h>=eKVm3k}5lB7&h>;HfVgdoD`B4S>I?~ zCcXtM4L1lj%heVq|9Uu9;MjtlCvQa332tS)g*$13?++~8bso1px7w|>HvG7#9a(^I}@CGY~;|YHtJl3$ezI_brWO_6b4aD@ZzG1StGR91fV><(( zbXrg6@QgPa@vzAK;5rOmBc3>M4UUOP5!~2-jpY14hMPVwxy{kDQwI+gJ;laxaF#IU#PzkJRn0_+Ux z;N|N}OCf0i4@6`m@!E+YO9uIv0C&gNmJOa=q78R+>_(CJeZavYp0 zk%xu5!rxpU;A8Gh1mkqW20e|_&EW(%YGUcHVQXZicf?})++=VbcZpLke-U_tV)6QG z*9x!#=;(vKv!?oae#{%eD69i|sv3$NX#`@0Vi(L$E@yK5fSBw0;qFmtf!?7Xw{MZYtK3I?E3Yiapce`CU9Dd`Xe>wy3bYQ z>~Y*6UuXZeO_25pH^H~~LzZ*$F5wG-1qtMpfcS990XLuA2V;VMYe*^l?uHfHjZ|_Ey%V0EoBmkd%n^PM!lmshbsB3rEP*;Cv*Kp+_BL$f10`d9r^{D{J zqBflb>TPsz7YYKz(Alx8DT&9y0pv*cC~@Amc3#6mFQ6T=)x~x5Os!wiMdcNo#LoXZgSd6CZJ!z7xK{k z38y}92;AX_!EYj$b=u3a30;}JEIWF^d`x{VI}NnG+|tKuFB^4`9(Uz9>2cYqvY(xq zJuW*M$~Sskc9gl#oI#Ic6`GIdoR*W(<4C!uZ!mvbdojAT`K65Zp23Ylm}q)$%7b(h`kgF~11}MgV2Z z!K!t%zkA%ZOty26K!NLRrolNK&zEC7UzypXBVe68M7lvaO^Y8ky1T zGoR$lHkcV{YJo8dYIjO+ELahH}$G#k{GLOi_KQjK7qq_cAPzL{bf^`b1+q9HW?}m`DW*hFr5(=vJ+riPVSU!C9^Q{&8J5`Q!ivi z=cY7`eH&#-hFhSMoe!(2YW*Iu?)q=R&4 zdXP3rE1OSiW(UcRHg%Egs8`ojD9`Cueg>Mzf_0D{+{rkG`V{`X3YFk*8{xaj_x|IR zJG*n$z+P`GJ}?>6qepa5i_#-6(-te)=Qx!}AoW<=C|nqZ+$TRUixO5h9pgQVre4ug)S!x0^a6S(!3^(bVt zo2 zlk@)Wt^<|Xah2Yf;yBPfvKRVI=jhOI_x`S2nvP+ns$u^ch9ySl2ys0yYunxa%8ubm zIMpvh2k-W8tX!xABR#VwQW83YV-g%DGNR)%mAi)q`f^H5_gv6N2Kv*Fl*{xpaz5zE z!(JxI?qSa>XE!Gc!v68&l{I5Em4F5fcM7betk+7XrSF$jbx1v!rs?kQhgIk~u{lh8M$B{d8cX24hPzF z9)_65YcD^xm?*H<)Md@l#wUWbU*S`6<`|jIjpU!Tgd+1iM!dQaGo5*g)$|n0CaiSh zdKJ^`-?;KNlx6eVy@o5P^fYjKJ^odwHEgmO={h&la76b>^UXpFSZeS?X*ja(3{v32 zpCDGwBJ~T~gJF0~w~bWv(uF!piXwkjdAmz04cH!1zbON-U}z{h z9n-@OTYJo0R1#(=NqqPHR+KVe_LKEx)FrC419i!IfHJ^P{l&@v1Bxf1?U!>}&k% zZGPbWfBThtIqi?yZ?yl?eyn{DzW#qwdkpRX{Dk&z@a_LS+MU{4w41bR;XD1~T0#qH z)7k}a3c!%oqwUhRX)SODz?`D0tE>aBv6n*K>`H{6eLiP!2f?GKtGe;PthnvBNPo&G(^!piUuhf zps1gsK8kuN>Y?Zyin=MKUabrh|oXbnZHDXONZ zilR!2Dkxe-Q8`5`DJr9A1x3p#Dy67|qGc2nQ?!(#B@`{DXc0w46loNx6e$$BC~{Kd zph%|3P7%HQ?>gi1KN=`fWL*De6)ZA$9a;aY!<_a%+RNInwHLLY!U=%i)1J}3p*^8} z3C;lgcd!F|SbJD|ADjYsxAu0h1>B2f!xKr|s1` zwX?N$tqJS`8??1rrB;peipHrWO69S)99|s%3XVp)_ z8G#>AA5iZDJHZ`rO5m;Pjq0`P6<{m4SUsX1R)gxaIti!woeRts{t6N(NT48rf&>Z@ zC`h0nfr11I5-3QZAc2Ae{x?WK6gYlAf>8t`2!;_1A=rmt5WxU~egu68dJ*&>I0r#D zg1rd#AlQwd3&Ab~od|X!=s>Uo!FB{^BiM%EECgE-Y(dbDpbbGQf))hL2$~QyB4|KR zk6<%`O$a;)+z2)zs6(&;K`nyy2x<_lL$DUX8U(8mR3oTDP>G-d!72ph2v#B}L$CtD zas;IaN)RkVP>f(Hf+YwRBUpr>2!Vz`MW7&XA#fsaAdnH*5!et&2&@Q11Oft!AaFAJ z|694oIPEsA+um+_9)AAc{7b&yVs&yBest~0XXm+E?mA0LlW%iNqp#h)S#PL!H?{cM z+}?)fNq19|KH1hd+0;Dc^LB%eX%NmIg9A|Xi_v?o8c+Lx8>8W_4!t8f9jo3_d5C(| zRc{J~ec{klV7i*QfK_iwgks()-JK4ABkg>%y5AHM3gg`yTk2ak*KY*SfPnr=rQHYv!C1qzGY+x?vmd+1Yb75 zr{!DmP@MtzuzqmR^s{lIKAmg^t_Rieexk_y5wEG}sNqGsWvse$%b0PF2R)~8%s8`a zECR<*#m3CXqR?Y}JROmU2fX;k#^8_+INN0m@*B%KKf{!Y9qR<=c8pc0dHcumK+l>4 zpZ$Oj4pxCP8voaD^G{_24mN9S@W72gb8wi$5k2i=gHPd}i*?!>&~|A|cOOiY1m{XeAzFfqU>1VMV!b`1Jxh<$7LPZWunp?^X*%_ElX zxj8tli*pZ(GR}?l9GMw)e_PX(zuD_<(sh^_o7&pl?S8-CUGMidwMBG8AGk#&V%`29| zY+IOZ*`QvSZBvK*jpdo={<*U)%&_bGVK<%5us9dKx*67dR-g{&`$2wNyCL)IEX~c6 zt*ySMdUtzkGtAhHjXs#Mo2T5(dRu+-RDE+pb4vrc)Dus%9){zVZ@C`h0nfr11I z5-3QZAc2Ae3KIA~BmwI2Z#{+ozvBE8*CWnyw*Tk4Q~i?qVb@x9PQ6;aQz>_SPyMd# zQ?`3uPFKWrh0>({N(m~DDercDSh-v|$2H_?v;EZhW9Jjj55Y}=*Eo=|US+BGXS!=Dl_^kLr@iuYR^#}3n1^%lMHQ{C9yTT*F?ZT|kCo~D=mcLk@vwYfe zizRO9v8>|%!vBbWl)sx#@;-hqzaFe9FOiC~EvevT$s0HpggxoVywCph!{6Dm;So!D zRD!E0WC^aOOB|bsM&>+zIB~4hh+cJA;_6b-{>bcfX8iJq#8nybnNb{P`F0INfv15! z9raFm(2JnR$haaTac)zr8@ALk2v*NYTr(2*-C(o{#od#Ecn(onP=dQGWGS48PlVyB znv5JuXC)3_dNwhsXU43$kiHeWwj)3;Av+dNF;^FmIJjs+n$f+%_{_vy*sljCh;IQo z%inV$Tttx~u9=azZ7E{7Q!EeG!-?(#l5g?63t|)c1q$pARq5VbM6#L z%pLJY;k?mI(h8jl&j-dqIMj2JTovSr%}g57E%(!+Xv{=xRu5)G`!L!rb<=B0$ex!T z)r{1|lM=TaqG{?>aEVvq%7MW??<}}k1a;M*UwMJVk(v(Afj8S1myJtY8SokNMFWv| zY|JRJbPOxrE~Rc|IcUTzJ492kds~!L_<0yEOGoGhnW^yNgH#eea#>s|*j3EA8-W}w zjzRsL@MNqroCw89ZTA>i0*$-}XofZ(kT~)KPlAo&p|9{}v;kx!`{-ZaeBdW{m+q&H z7W`Fq^>umX{E#7ySvx9m?U3Q%{?Qoph(SH-gKh!8jB**-2qO#oW8Ival?>dKXjG@;pi;9Cmi_N zr_Y6>3uaz9C~=!un*MN{Fp`;T-2m;0nCq@UZ1&Wz-cMg0iQ&GRg zF^tgCbud3cs3#JL=#YyZGh#Y+NZeuIF9J?z?kVsqbNl>}V{UMAaYqud8FxJF_66Kt zU)12-9hk$;W_rc#+a+%9jItp7=Vq|}Y&z5#%y|+x)EknTp9`iWbzqx~OX3KXpENu* z74QYTK@VI)H34G+!I2p_y_fv*(b>osm;eVlzsH>My0hpwkeM&72s7R2R$!nchXG3X z>Wg2sC1WObhojz@S;UR)5;qE^vZ34^hB}$igHT!6%(0TZQJ;IZEhABLo5US7B}&uI z#@u>n8k&LI7lg?Ngl($M4Xw0$PwR2_QKKig1E7CEo0!$-&9t>Gbh1WT)Gu(xh6$fG zq}d~8(z<3kgdwTd3%NxiZkV|Jv3$gpO>{&gP1|UB8GWX_(U{ty@w=(~O)s&yLE?&` z!N;N78BM*S9x~-%Dx#N%!3G&#+X6#?K~TOKY9UPkBL%p)rK3wW!61dv#++oNTjl|c z03(=2V1yUDVLFBI-tNw>{^71v@J0-_OFbYkke3m>s7~URKyYAg+6Y>@fp+G-L;*8G zOKY(+!@xbTt83r@9GyID#4KG;=}Aqey78cN4qgVyy`uE?w%lxkVp0_Zh#oLOY0 zjCqcvvMgOoS@2AQCO92}fz2RZxd!S5n8O%vJRS{9CP0ct?2;m=78uB|nSeq?^DkXY z2PbSmQE;(C@{iqB({oF>*aDK0?NTI|fYzGIW!VZ;Q&I{~DkJ5GTu~K-heMNLaM24a zcRY+ z#!^Cot4OcSES9c#)G=-^PE2No-3$x>KG>vJRdsjWRx@0L;hhsWy zI@9-}1!$|d_opT;kkiA8n0MvgC+`5(_v36F_#Xur-EVekqTZcLW9Km zkNN~7hed+KErPJ=u*C>k?4)Gg*_gu!%fl@dAgm65hj~(B_6E0c4LR+HMge&V_Tg09 zpuxr2471?u*Q3FC1(M2t8V3W?=V(yoTc^l&q~1Cet>hNwcEAc#XEoIeA0Qh^9rZWxdki) zzjS=falhjV$9axg`LFV~56N%NQb6cK-g+c|y z)4qmrpEXJ#v1acwN+rr{F%(cM;{^dr=|^iyE@ZH?7k>u_M!U22T~qtuUcN!T5awDDMFnygZT6 zH1~dkVtEo2AABQN!TaZQm~7K=6-iKfFf}zl%;%$4?5v4~16Go#H;2AD3F=LKR@T{>)+a%&8Aual zu<8!OsvF-Rlab9yP;~mzX$&fN0z~g-6_A;vISIF5x+G6@RO zT1W?Gstjq<=*lFhM{bA)GebP!gFEz0)yZ-QGYd$Tt&7Hby3n&*t%Fz3)#9@^g^~S8l7Y7qQMK9v}o*^ zo3to>LlQLTeb|BwB?eZ^G5F{x9)r~k+8CkJ!rGDz5j0V4613~>Fa#}_hE@3932vpb1!QkMl^vb*`m%`D zCqaA10WwA8*CWC3JicreX-#r9te7)My*MAh)RaZJDOn9Blf0Cvl^vNiQ}17utO8}) zoWchm++@EXS0%j+_Hi59kS;DGpSAoV!x%!F1e;Bq;SbgVLg?wED7ruEYgEZc)EN_ zCta5Wb$=^rqWNdo^#*S%lc4isV}r_~hFLHlbGI7NRY_QHn1x82WCl%5av80}KnPka zSP~||kUoJ*5iFZCC_TwyiR*^KI2mH4T^%n0s;$Jnr3Yh7Kzw($l`|FIC0b)hwm+;L`6#FwKBPg z3B|M(!d+t&RhEPWD2jdxDjV7~_Lg*PZ4%bN*rsT#o2o?St%o%2x+K`-+S9Z!Dds1v zNGeo~VVT1IO;)ihk}ld<2BTq^dgtJNbna?{Y2aLvy-KNw6yPWu^1axoKKs1U08vdYn!N@o+d8 zgDpckG39KE&|vLJZW-A%8(~V&NatWzGU6j*dFC~r@$7pWE6o(tArrgSoQr1Wu**!HyRvw|26K2~aD-i6~ zxH(IiDNj&a-%KQN3_mg^Izl?SGEOrvY#(gz&6GiD4BKbszAwu`~z30b=Nom$Y*ZuiPt_^1bm5OeQC9RIpZAZx9?loc;a6s+&IE&*}JuNR?%3Z<%P7^D5 zKJEDsTPoI)VwpJ+87P{r6gz-u;jQ7KS}+NMeO6>14-XhWKAX=zIXAcK>?K~ zinxJ3l|^uUqJlnA6!ksz>HqsX=bm%#ow<{?&^r11NtJf)J->6#@BYsA+YidS4#Yd4 zx8Zeq=je6xuBo{rE|w6~M~$gOV^{oOqdC`=SCzNQ^__VD1s!bAf8^iAniU!lG)tPl zUn&1&wB>!;3rUmqE9D=f^b%Ed9J`lfy~1I8@ko6d)@XvM;25=1^+mSgs*ZMId|A`v*o2v&c>lMy&s1UAmNVYWt^k}Y(xFt>Ui zs_-I?St6!S!hp-k-o>cKS=+32rE02|l~ysvw~jHY$sORW*uomCPGcK#z%)e?RwpV6$W@+cj(;Tw_z8=m2L3{Ri7zUQ(lD^b(K9srXRtWlPRowotB^r*CCr z!uuXX7o0VY!n zWA+PIESkeGg?;VOq+I2enR*rIW(iHXGMRGKH2oC!lbL4EJhhTdH*+^DKbRyd_I~U} zs;-9Ez9EX(aSKu#$_s2aZRtq#nrowz&HQ|-0_E1=V*1#&rmikFRoSn2hq_zcGa5UZ zhpZT+*6&Ch#4>tn2F^`Sp2nDFU-^7!x#IU!Rr1fT>`G01^z|MOvgN>1!Bn+1rm~rK zUD?1ywo2iRBA`eu%yiP!!3y9 z5d@C&9ekmvj?A zq`+|m5Rc?S@ape6D$b~V&~xF#BVJl<2@ljtA|Vo>=n4xhDZV>QZ{><#ajRuBZhS6D^9%(7O{9DTN4o0s`0 zPhz^ZH1&*DpX0l|s}RXfHy5eV8nk1i=K{z{$3v)~@0UuSS~D&Yvpb_4}R;DWG=H!0xbcGD?|VtT*|^r|^nc%2X-qY|fnRuP1D_Av5qNiCXJCQnQtf#^pM)%z+!3stiGr59(g;~5O8Vlj5Rhbeh5@o{gV9t9hX6V(bI;bDs zvawJOcu0=b|aFUB=Igsv)%+HlKchVC>^_Ps*_O__XzFfn#Hk0v1-CxfbSe)pD4`00 zfu*SfMvc~9@S!%S``O}`18Y~*Sc;STD)ikR=jdAj5RF`Pu(kO~?^0!~US@rdaO7M) zPv2pCJDB_Cs!941kH=KP!(G~xPe8^i)vK5w;4oe&*R$PY5R-*+9UB`HBF9jnTsvMb zgIa-EL@2z(G*2kk@M`THa1g~@=iw`hJ6k*Xkj`ZXC$te%EQi}FpTig&vs{vyLP)aL z&#meQ+5R>NEbp?Eoy8^?7mOQWhvJ2^8Qsi6DG2bg5_|hnk!K=B9v~cSmhYcT?F2V8 z=5NL*yMe7hcH~*dGub~N()k!0DHq}%o;4(157I4RAbT z=`F7v<=bqGI})5{A;g;1!%Xeww2#3qFo`K+oUfVLOu1^LOkctF2%&*L z$P`=bm8JUbVe^;2WPl{zPPkwMv=h8N9^TX)(Z_o{3`}`m`2pJvXkD_&Gh#n$xyBh+ z(-P80Qx)0|R7Kefk;Ew@K&rtu+L&O}}dQu(>^JKsMe&@_75 z`kQ;&!4CW~@KYt=0SJ?@3A7`gY9zCUDf4j!UzT;Dm(4X2Zo$q%*#N~YDIEBC9AaYx z6#?*ryEz-9e*40gu~g?9b+-LZc~RB;Q}s2f%IBWR&zJ%#i>GQc>X`bA14yqR7k?k+ z(@Z*Xdg~|;vt^mrpsT!(DL>39Ypz5xwj5xmg1IEzL%rHd^;(ZdY}H)Oa`B>5USS_F zC>-&BFDpM3xI%(yl*^Lo*V(DDge__FP~rS;TQ^fIj#Vp|aNsDjYsXpr@^BRFx#)yO z9@-m(k(f#jK7$ssX<;_h$lSqr!6LT0L;edFE7rc)xb|i%Dg+hnYjUKdN&3zc1K8ea_o!wDmpgz9a)LP9` z&vf!K_Cwa+}Yu`!#c z4etBLGh2R(Dxr-W!`C3>}=&UF|w|m3gHvxP3wC;ifVCIE&ueg$w15X_Q8>J?}Ta2 z%uzTBZv1Obx3TwmeSVeC|d$Nyt$7ukFm2@;Pt`!eu zOwhuQ8BES29WT?pWQB392Z3C4_nz@Gmb<9o*azVXH# z{?R_&`&;k#j0=r(jMKH<+CnYt`J?9to=3cC&j&mg8s)~wz#jrX^FAK z?;%8Gm_8A=hc&oPkO2Z_hdo}up^aA==NO>|Old7!xSTr~jv!{4WRiqrIcKI;BMQxJ zLvS*%(DP%wl)lzgPMkeKTOvy2ZbSP@iLKdeUPnlXvxwS4!W5c369-3QGi@Iy8WZ@> zpfYay%`w*&kJlDiWtK!Vod&|)ayV_8RymjfZ|X>`gqynryD(+CRs|DIe%sAt0>B}c zSM5G&hPM7FiuH*>t;l3n{pL@M5-unmiMJZjx=PsUASN<>yRegR-TB09R;XT4Fa*zRw`x))|GILdyyv4PAU?1xDGb(Zg#BwqcvC%oFCZt7PSjcf(jYgUgps*Q!}@zJT&& zG?VS>((7ri=GT>8$M$Wm3C&iNUOPrB6IG>_^cosnoCxt`j!*iH)Rk~Us>*T4`XAZ@ zvx;q!JoBpf3r%j*RZ4lLM5{B;X|bh|bd}{4_0~LXr#qDBOBhm>KaS8!96n8#IA1JZ zdNG@sS>8*^Ct_N$%~45tF6;TLM0C5`(zDqwo@Prw*YD^Lbe83J^ijS-b39ME^5uv& zO{GnO4abDB+5-7xs?fXpQ}jVOJ~j`H(aP0qpT_un?Huw?Aq5JtIb~p+A{Y*s0V2p$*MV?FAvPtHrtBxxV8%wPrD1w$P_tk8sC2#X|jQmB>86EkYrYsK3yso@JtcJq_=w6x{RV)lXr^_M0>$M*$=L z_WMC?Jcm=BkJ8+>duDxbytYK81Ig0!F^zchU+$eq=tyW^vPe%DU9nMZilpn=zPy%#b z(E5}9#Yx)UD#4))5-fBJgRxuxbDad<%z?2sN#P^$`*Q7^W43xL|3$OZ;nMRh^9_l~ z%oR#`ijFnQX~xB~=6h^7rSDCaQvhPXzst^jSBIx>Fh;`7+lj8<3pBON)@Et7`8GP? zQlLJ^SW(vrBUovNsK|!S;o$V2LG823HR^J2cuAm5zgfS+^GB^&J5?JSdoA{K?C#j5 zu^pZ#W79qFiT=~m7=78Z0+#-3J;7*?`g(K+^6T9dT^=19c|+}wd^_?$3UpcM|U=@trQME zkrzVf^6n*sB+4$@+&J;>G2wF5p|Z8RR%yKgN}hAa zq!9hs60LpEnnDJ&X*EAex(SYx`XM^Ska8u3D;tU5;$!nZIxINEU>d>CSRZGp19O7% zKTYhI*_aI77ic--@IOU-H~D>#ii8p&u$dr=SB}p^bS{a4z3?@Ld$25gkTe0* zagOd>f;>PV<}7d;h9t=Swn3R^`>m1G=JY~Qzv=y(q7%PDhU66ljQH1zeF=3uNp8Z* zu8yPaSB5b=;;iqX?blWW{>6-0zMJ?@S&|z~{c$=Zt@36OKz$dfBjkI_49TO3KSl(! zN}XZSAEli>w@?P4Ft+I;o%-^%JiJBx^Bs)AMX05su!j z%ahh{^F(bG1IXp7H_=L^DJ0iL zrlJpH^ffxM;ct_WAv(&hX=T^7cA75N>qV$8b-0S=HOeX}zv6Zguudf_X`-N^MysX9 zrc@F+JSfqW@&--&JjKNC{+6bnvz)5^H~Vl$t1KA%H?*?=yqs$cRNZ)rpWVQq?ZbZZJ}lZ2xTTi()5=G(+}+kIpX(@b@G!X(=!5#r95M;(#+vryU^vHHF=6s_3 zlE#*SC_kW&5gvfXtBS!kFdENV?4K@fFw1CqTbsHiPdI&GmJ_)xFy(i~Ulr<#TE%%f zKUlAXshFHz=)e=drR{ww9WL~i$))>g7SV~21ZIX}EWiOQ2o zp{>Gty3BEmHPD7&7BZ;|x_jI_Oti0Ycn-DL`VpxSSI^OQh{}VeLtHga+bscGWV%E* z_rjI57n=x`b2%{Az2y};_Xh={CG$htdvOB0I<_pKqt;;}GQC%5bS_9%OKZa>bSZIH3d9W-nI{;vAHi-9u14icUVvo({e>EL_ zXkKz|(q2Qo%s0PO*g$yfEDFkKl${<|y|g!S1TC+X$EX3mb69d8&LGu4SzD9WiK>nE zJ6?;4pthD(v$Gnod8KT$7+V~eu0jWmSuLH&vK1#7I|bVv-4Fl<-||K| z(?J0)Ye>Uf4efW1#+%ti!qo8wn^@7AM+PCd^`sIo+n|MDx(GTPL8@sf+hB6oQ)=i8 z8$omA69QPbRNF6FkrR9{(S#~hkyvha?OI8&{|V-Cbh^bJ=W}NGoHcD+{DVH5-2lF4k$ozxs9-hkS@U_6hf$IVX0=ojI1!nu- z3V$~I{@5$ACu6tA24XeQ??j)7Y0=;4Bco?T%lyy#A3`L5qkjpq`u)Q98Q*n?-Jj)C zy|2K=ezA9xw@Cky{u%vhy;WbMy%N1dyIy@+y;r?dZBtKEqsq(CP~@t}n~@(xo`~ET z*%K)b|17U%J!ZFB1GcJwnQSu5~#z)ud9-@CbdYH zOMCT8A{*U;43f-HrlQ#*0;>dAni;gDs=a>A3(4^L|GW@kRQ)v z0l~njNT|uvKvrasfreVo16*ELU*o7SBZI6ogtLo*R<=4bgJd*I0EA5nHxq~yO6<#2 zP4qoGgTyqHNJk1X5|Csc3vf{escL9Ma{$Y23Q1?3nHl7*Arv;ac}fZt=4Oz;hVuS! zD7Pa+qFa(dLK_-!cPp}lo0vf!n_?u^u^3_HMbY873{uw=p^&dhs{qTfR%t}*$Ls;3 zye=9=-I!7@>n_P46S(!rWfn^xB{YP5o}m#lT(b}q1>pfCC#IZaR*3wp zG}6&HU(uZpFq4pU@GDWXB#p#0L@}$T0Hd{JgF}uQf^&B`k#MuqNXJ7H#s$vN;Hoqd z+z>JCQjbktN-lO5-@>BpekqV%oJIm1N>!In9<<4gZ;49fX{5%Xq>=e5abipOxoPCg zp?N1vbGcT+KZ>?1(#WAhOEe4C#@aBGp{y`7jl4QE^*48?Hup+-kfZPi(d_gza_dl1 zM-FBCDhaYMjr=-wn5;)n$RJu>mPW#zo%yPWFOY`FS4F)UX{6&>gnH)C3b`IrN|8Vn z=?gd|9C3rWmV@7kRx8rT(!+9>*-Z@=;e;7MR+yMZ9!pxg7I&fiT(mYf-NgsZDG=M` z>0b-jdFf8R3Fm>O=_-MX(;Yr#0fxc}OlwKP%t-I&`)(GDqoK*^^C=A{+j^|29}-9{ zNbljN+@L}XdPO>1zA3n+!{sGo)4P!x3zXs_F4E-k_mk4QDB~0I=GjhPnS6U(dMDpx z%ri+AOiQ0d85WU^EX%YgA;zS4@LXI>Jd>{%r?>OvdJNXPX9be;)Ah)`1%eN1=s_-D z{!Pc4%a=pv&*jU@-E;Z!GS^(byeLmDUtX9mmoG2P$>qz#*~RBAO4st^^(Z97#CTfN zDM_Enw`@?*#YXm&fSZ-B;U@*)axG;NY2*9GXdZV0$}1u|nVqiUw?pKCm4K7dr}Jq~GfEf} zW$CzdC7<@Du^Kti#mL|i4!Su=a5g+ug(#k#Udy`|#pO<2u54#v8W(A7MmtjMD9_^$ zAgfGBui>fD%p{g|>*kAYk1ty>|B~a%R?MqJOO-L%8KtMB&oL8WOEnE`gD(iw7N#3` zQshCmFxnJf7FFh@PvzHIxN}y_1dqhk9H+q&S91vW#MPqqs=SG-ImEFiuIBK^l($Rudygb_jpv4RN57i5i9C?i?o ze#-7ZFIQ!dit)JFU5?YJbgP-(-)xLs8KUJa+h8_#u5Q>M^BKzNe1wS8re{gSbRWM-u z+<3;g-RLw{8B<_!|83y;z-I$T0-b@ifsy_{_@DM4@%JG+|LoW|Vz|_s;cLTjNa@o&p)`XOkd&`EpG)Zqf$6Lacsfc6>jtSxuxS~vc$A)>Y~F%V zr!Sc&8k&~@o5xfTO+D}f5-@c6k`-?!fEQ)J`jK>hbbFrcU}^@;;9S%Zx>QO&ZO33n z25cdzDFlFNzIFi3$$$;CZUu1B2+IyjGut>3x!f+lNUDb=WLah_PurUXDF@YoLTP4; zYr9f$EZd!v+3Y}8e3+$0mZ0-8n>hKiP;e!GS*FevUx$8KHa$19k;_0?P1}^s5_VC> zAPVh7^-wZU%fM3C4~(Si_cd1zK*CK}g(LeWh{2K*_>rM4Pwx@_4+Sq3~S<$^XV zxazDW^x6!#*8LW=WD1X_z?Lnn%7C4v3n07fL5%J1S-i)o1e2@)#5{CwlkvCXKLYHSIa@MrcilGo%>>ak0TmsecnIbL@ z^6k}jUDTw^1d_9CXC&NMpm*k+;n>W0o}n7@lKuVo%sAeja5j)NMrX$2GzD)+$?E0( zQAL?CkRNgV7$q1tle>#P7G*~B<0{uw;9_3jcpZ;?d4a>Zn-_T5jKR$d9OyXA z3%tTHnioXt$825@I1g%G;6O(+FL0=1FfVYx<1;S^6b3ghaHwN5FL2nSn-@4-9`gb( zoSK19Pka%M`o4Z!rV}xHXJ;S`(@AL0hCz`eP%F}wD3|QS%qg6urJJR*3rjMq`Tj(O z<`DsvIxc%+*%o9Yvonx~skb_Jza?fAf=fl{J?`Vgsazw?6owr zSVA%`!9EHO@=wn!;@t}W4e8Y(8!OH%czS*e46&V@GNhicbm5qHv$H|_xL>Fk;WH|$-zazvBB-(2Lpc% z{514n=(s6MhLGCkr6pBFh#{ge8N`c>qbM0P#T-#kC`JmY!5bED@#;Hkiafg1xC z1v>QK>;D

YEsP9g_Yx11tT%@jvUo*nh5nt2WZV*gq!pqVF%h*L+{{eb{%EQRhqf z)&##9yffHu-08i+`=`)a9ODx+(Y=HalcmVeO%k`^B9*K z?fM*Jw^41Z@ZPOmV@xu%z@Kn4;S1V1c@z!owb!dmwfu1%KF3Ftdr&&hDivC^T-&u8 z0OYh>Nba4ksRm2MWf^viAWH)UX2*65kn#bRqDW|rxnPC~qui{#e~ezL%I3MMS@{o^ zW7}$!qD>ulV=4EsJRLR&yCT)il&btqD^^%vI5@0&A^5OTf;(5I05!D82D@-?E`|f-ozf^4~1GR8|SE^c$8&uDhuR z)2^Z)sd%nsnRU$ULhk>BQWuUc@p?W&1HV zs~57A&-~Uj<&!MeR9&7kUtJZr3%R*iL*|83(jmH89blu=6UUVRba5neWNcP5jJkB~ zBrc!8p(B#Xi^%Y1HO=~@OXM2K*@d|q+M0bgqpVeIRlH=fzDmvGW?s9AtVd|vXkn^_{8V^E%{ zXR%cHTyLm0uxxPDvcoICb_<)Pm_2zFck0gNdZ(%${oZ(H8n!NdU(2RcHfO4?VR>)1 z*lxg5uI4xl;=iOuSW>8?c`d17cISk!HY+c(!QPT+P1basfuz^*)_MwrXQNZQeG2Xp zP*F&C2=;G@_q2cnm{7d5o=-4a5;;5Dte%QYu7+yvA4oj676fYVj)Mg8+)M4>waJUS z?DGH||6THH7~;IQYiSMJ zO4y3Xa%59u{Ma(DWrMix( z;&}uRb+zLVwA~D-+{v~QaXgkw8;OXWIilRb_D?I!<%n`SdtYs+Z zjvh>GB&=(T_a<4KW;t>{^UqM9nW?W=Rl1%7uGOGhmC)93>XG+O?M{|QQ+82%7N4nS z?Vx~>?76sOu3pdgEOLH9i}|9{YqzpyNJrd>%QIS$RF;QkG*)h_E3I|*S$gIqeFZg} zVhdnvZ@i~pgaegsV0lO#Wi;&TLv=`C1qWQXf5lnuO&UFnu{xerFiYrygWf(IWwWbF z^*Ye17is*#n&5Uf;g=8cwc=Lh?gY7G%~n4*SKsOi*8=yno_-tTV=MIwj%7D8ZSU58 zblNz5fr4S_h$Chj`%ep^bMQ@z#^`18H9i>5^qs8>qxv+wtgc*FSB=L75q%0CoA$@S zbjK?qmnc3DzsE#QtR5L7c0{?0<@u9Emej<<#EvK*V+r~!0B5Aw5#^&SVO?&yGg$11 z@)3UYV}E0t7A}eUG^?3sJC|X2A5uTXRv4Wkvg*T(BeO9LHt>fS!lEoYHt<90gX~a} z2oJ{dvW~&om_b9xil%2Rw-=P3vCA%Gb6H*x%9AV^uQ@t!WKh1qT0nLJakaGaI0q4L z<|+5{{lon}R^=YX>&Xs9_#9pFeMGsN9Z#|_H(KzB@^N-rND%S;l9N{yBp*jHUn+QN z%D3(!f5(Nix$BO7pkt4U{!sZo<71AxCq&PI)}1Ll)v@f77_mytB~pPaHP@S5Sc7g*Srg$Ubp#UpZ}7-Xd zTb0{l)0Gj{>a9I1XTCs+d(%VP9=|(SxgRfnYSkz`GqzEQSlw}OUGdN1?2|ML$O#{^gex>9?|}xeH(Y; z-KJflb!t1ccWEWQJABvsF7b8x&h@SLmH9?{|EMY6SG_MFQaGkBU--4) zw}M~s?+ky@_g46Uz??uNbZh8Kp?8PQ5BowpLRF#SP|W|wz%BkCgcRH}@VMt2`tsmU zL%$4OA8bWFz~W%cKgVAb^!k??e?)eHPa9Y3y8`{buLeE`B3x^NPW{ABS$Mh(2F}*eDECq1k)>&AJc@?qZxp_2fd?6Wb}iBeC`M46UX}{!{S>HY z(wg6L4)wxql&VT7PHrVcQwI}PxIxm9{^A2fpkzC))Q;N{FT9^3z1^jy5tvYKq*9wl)(td!q1%MKWLd_pCm_Vb!)J;G6*T>OiQ)jie6%(fT@VP84TG1!yUU11Nf=&w=z6UV{9xAx zyoYrRk%Mm11K6d6UA8`KfVKjsGAu(enE5qfY1$~IuMso8#QT_URC`tl_Y90_Oef9*L_KaRh-au>abd~2BDE=Qyx#ab7@<(;Vzxu&Y|RFOmEdd zRI;A~g#RuY0&_(xFWVo!M7w}lvWKd|L`iDJcIX#acIPK30mfPNGKR!b2c1Dx&DnD{ z#lIhDLUzC;m)Jf`6HPcLs70YKu(*5k>UD!^n!hK`)N0kR1{vYR~lEP{*)!EmC7BuR9-)Fxo&9Ksfe8C{rg; zW~J?TgRGT%*+uSISF%X!Qp-5`yzX$-@wf{l-@Sw2E^>6p|!so!F!u^d(t2Wk+PVq)QZox-3c z!lb}*s;<;$X@K)7w$x{6vh{X@Lg>`g1z>SuN~=#$M7N8alO@2YkI}fhz&W%*>gOnJ zhwZZZ!<1u4&8_dCXmTOHweRDNcA ztXrpifgUaC`*Bt($;hQ4y78`|J#g+5BBEEa>^nP&}S=&`8V;_rM z6x$K2K-T|XMxTt{6@72CJGuo@x-ar_1n37Nj!P+&)YDDeJ3 zZ=fa+@xSH&w*L`;7NmZ=-jC{!jgRSn_Yw&(qiGBed7G zr?l&}1})(EvFAqc2w(JU_mrxCKB=PUzhUE^_f!nPGrSh=lX>FVM21O3u03{V>85AC zHgABu!%crnItpEmnGwRlrD6ci;nce0{G#K-d926^pm9X0qzX8r<_^G}TRd6QZ~v?mg5(E$A1$cfGr@0&VOZScurTw^*U|< zVFg9hW>ZB=PfC^etQtTZ0gJ{Imm0}%3_fA^w&PYel0I^Ko;!fB0*crXA1p&Ga$%

JV=gpFM5>aRpX>6jc^189)d@J*t{sj&fbO&39RA`~U(8C@N0qCvOrR z%^N@<0fndyqO81G08bx4Tmg+&j^@$^f)E1&$@T3Kt&|NQ)__*ipvD>jJ$3*w2$W!f ztGaF!?~4Wye^88w1xxR>Sd@()7)5ao0iV!;$&!f!Bl+fP(}!Its!bah!G|SVIVK?{ z4#aqMo2E<_P8f)yS9}*gw8;lev|Kb0F;57ohLmL!2O!1Les9YuCktl|AP#``d$*fT zBvjErkT*~K16SCM5nYcPF!)U3WvVQgJ`ms|m7~gQ5Ov27z`K7EdKY@VW>GSAz{efz zu`cXNJ_%4f;N>3Ixf(wSGjTvSscEVIWZ|>{jSoh)E>J>D8SwDfV5eeGmd_nf`HIZ1 zC6utU2NcfJSV~;lI4sXO@!`dpOZazLfYOIfz)a3u%p;5(whr0X)XYU@&}Nobp{PGC zb0Poq9RETAF)4ExfjJl$dje3|!K}<7&g!@(v6NtwGX1;}dqzuHIxEx18<9b}a5%&% z!LN*!V8xk(oE|nX+(1Dxn}nH?Ilu>sdEMvIbW*05BZAx(@eUBpQM)LU;*+~I!IGca zTQ15Z%`$lxkL-1BriTx|%}**nqlCAbcQlPl9Z@EltTJ z__xxm;6RSA8K3FmODbp9OvrTd?L*$|BYPg7>EM-wN2Dy7mD$fnsp$ZQrm4BTgOuwz zZBNad&!?O%qS=AgtV}y^$EJ1$Q(&sh_lS8nE7O((X6er*SV?9d7bff)b4!^n;bvv_ z^5*PtQnN0>re@;iX5&<|OMoevR!(%M&Rv#I$h2_MvI$5ZE{lTETa}H09s`#5&mxBZ zapNZAj!0kRrO01n+oG>VAB(;>wlX#?`j^Ok;oGC%j9eDIGxA#W{OHEWwn$lIMEHNA zq43Yb&xY?s?!B(?w(zp>tnjFCAaGsi_n{w$J|DUX83n3CQ$xYv--53rf8bYeU*CHm2o2-YX5;(3(vr3fB{{XepYg&bvj06d^P!Q9?modCRNFz;l$(5xa*q1a>h zE7`3!vVKgc$l0UynEnL#`tCMp9gGQ!px`NZ=YN#+E=Z1xkCOoT;j+5q93)s?b=s9ztc&orFw4{UDa zsNK}t41KJ^-6=Ssq=&?<$`R%zEexK?pz=Cf!tRoi+RmZ{Y=zQ$XSaGzc14@)q_**) zwK-qt>I6d{t&(qtrq9%?)j=IGCMxgdLuhx!7?EJ%C(^gUbjBFb#iHKq_9-5tGkK!E z470%QPa&#w^2uN?axrPC-(qXj(L%20#yNbaFxPd%In4Xs#gUaa_C2_P z=qo(#o*v4t)F)V}U6i$nyy`wS_V!Xs&{g+FA6BA>vXyQaqsPR?DgpA+V-q$LcEp=89WUEja<#qc`&nF> zi*8SS4@VZh5g?TRuUP3*VvohTqJN6^MVCPKKQH`X zcwXrC(Bk0df;)}x89M{t2%P1A)xY2O6JM8ar1uuY5r04*tzE16Jy&=_>NQX__MxHU z_>&G52Nl($Wc-NR@F-VRR5alKTevI7^fTx_-RX1sRxY30P}knllSn4^rlz0Op6qSv zm|maiZEa69EMB;1RYNs&G)O$#o{Trt)t}yrTP7N+dV45lwc+&kE~{=uRXqaCx+~1W zv*3A0{+0;D+ZRtI^|U^nVUzCv*I-kJ;5FRZR&jb94y|#C#)Z>37VZBUEKHgRA{?H| zd*7G0_lDL)swEi>C0-_n*AZs}mjD*?6BpF1eJQHz_Hf)+Zlr0=(mo--Zy`_Re@sD`sc= z-u8G;P1oKyrfQ2tg|uf9N2B-Tks*r)Zb(c#=}2ITRMd2KCwfv_a5qPLvJESrg;0vg zcwrI8A^%c%$K$aszPG8jgHq#n_olWqrP{3NaM1+b`$?RQX782BWW2MvqaVYNkhAg7 zc-}pZf;@ZUJN|eY0KWTFv76at-+RXKzDrL;-?S4@PhwOPV|nA7PekKpqgh+b&i1a> z#6jHa(v#@M#%_#ZJ!Z5M&|`(!_PTg-e=5d7cWUgR=L3ZD1HOf0ro;*TieZK8P<=@{J;V3LPE)*oI zR{=Jn3Y-xQ^69-@tw_*evTTCDePP}~({Xix_Kuxdq0Y@+x(3p(=y`vL_q_h(_H0h- zK#=v8QSFfT5!27@N+bMcM?59zJsg&VV&bw3-{gQZKWBKof*c^0CA)R8wKq8ODYyM z*y1Xzi*Z{!I!x#bE4%t}iLmFO#yek+6tZup^L6p&-hIc|HG>r$*LZlxIHHc5cAMe{ zQ&^)Age%TOrkHA#jlv~wqovc4tf!^FswtW3h?D$3QTehG>y5q+tNsI#h2f8c=i<)4 zVDQtynef`58o0s#3;#CX$Gm^_Zq~2Ueu1dVE$ZFs3{)B#f6h6D6Sd4f)pyoxTDN&; zy?u#RZ*P$2RKtO#kYAT9Tml_}wSwy7JqOxb;&tTiEP2+nXEksCgztM|2|G0v?numD z#gNFXv47;26R0k+sNfU`%pI?CFBB~{Qtga4n*tmwv8c6fB}epxYf>bF4O|aos(}u$ z)50X$^fQ{85nDK&SW){HZm&^MNjJb%aL5X?A}C2rv-KIZE7)fYoHX^6{2Z}w3vq@Y zBH8pl<}a{Sx4x!yIUf|nGo9E|NPdOepqzocNcK_K3hf=-v)JHNY=|mh3T4Q z9Mk-g_lZ6)2dDcWIJ;7M&Qgwl<%y=W*eoQ)k8R#?=E)V+(j_N-Qdr26UXbZ$E8N&k z;`eiktw~X~m}8os<^I=BiovlRv4~?k@c9Kf`#oC&k2Z5EkRTTjnj$Wf{~ z(I~ymS!v?uIxT|>IIj7{_~RVxw;}OJ!cC~)$P}Mw!pX66+c>w!n>uNKw4Jx&m0T2U za&a>qD&@2}X{?xYU`+PcoSWH}!V70}GUT^QoKP}2d2u8tD&e#! z{TIy>(@<&Kh=l19O7Ql5UE5*Ml2ar-aTdp}q+r;Y8euo&jF}v{{FZ0rdT&Zr1H5m@j4mcQEfjPDfh)82ahclrVC4ec*rjr`2SaiHP;=|Hf;>Q*p$ z&D(g#%iiht*KFL_-nGAijXCMYZTj)Lr$BzT>>W=i(fPKx$RlMf zE4_|)pI_;mfp>5c=kexd%wMGT+!i;z z&#YC{v;L%yVikNjld#Q6k{&pN_nF^7`i_?lEty0zUQc%u$bQqFYTj{vv(7v2*t9{$ zrMHCX0w|HVcpb-~^W?2qfrG1~S$`Gp|0M1hTojAz~^e9<1c(RR6U zN%9^tkMec$@+Oz>PrZwyaI*Osr{`nm8s2N|$(xWg10Y zw)npGB<$Cf``~)LvpvsGg>L(Ya-o1IDe=k%sz zd&@RTA6rSmF2~eO{_50e*<{=c@;9|C?%L<^bb!{BPf~05t7A8-Dv_ohO1wdvAOORQ zinmQe#gtlg5wExofkLTxmF4AA+0|Mb@2AL(LE9Uo9_!|PQ_xL>ccZF$6?}VA@%nhj zUP2_^YU^;&ZU(7G-OQVdMR&Jal^zMAqMLQqXH@RkxV^EyX8oqh?K`$rH&$-kyuN1J z_6>D|cO&o69;5}2a&>7j`dgu>yVIr44$(_v<)+GwySLX=)i>_kysfr=OJ!AcVFx_aAFxtmaw8~ z3i-vPt?cNihffJPR=T`GwU>-?961K?q!tB?Stsie;CCCDhK#k$Z=0hP7>=6 z2RZ_4WN$7-XeMry!(mGFTGQ2r^vUaxtcS&1Wjz9Xov{S|PX>m7kVN^k;0fe*I@I)q~sQs=uFU+Kc$oh{Y7wP5Ob)yvpdz# z{z5bN8e8EvU~JyZus5ww8|jvS4NXbhl5~1J-c{WfZ|Q~4`EHB?k8;ZofF?g`l*8^X zMtT#H8DS~6w$qGk>d1No_y>(;FuluJ%=@zUV|H(Ug=#k$B`}g3GiNfyx8ZKi1966y z?I*ylHm1R1E$CR;+R9+WzW}(xsDPcB&ly#%K|1f&I7Q`DVhJ6HXFnUMON^;71oH+} zRSbyu*GOGxl*6Kn_AoB0YWs4C117NX< zr7>v|s6yBwf|X_s`^QLi8dG6M%-`90##|Ws_?%MJb?r#d4dJyP(T+X+&ZktPe=@9Y z`8wKdOoiDie@B~)X)r}$-7zAON|W^=f28XClM$$yule=HR0K}uZ+;b8wwk9MhBYsL zq*nSDBI?jsuz-;%-im8mFX*Ma_8L2zQiu=Ceqp5Md*>lk5Ts!>s@mc$`)l@^%iQ^7 zq-J>MBdm`v09D2HFz`QXZ9IUx9}obT^=PD~c*i07&KNtEwcL(?Ec(w#P4rGiK%5|f znK#1x$4HI$&OxXe-%3;!dPz$3$+vySgOM8Toq(`1zW7Z@24SRP-U*2Kaw`dWOA(J{ zl#~D`kIL)}rT}ICZS*Q1Hcmrqm9cg$LtY5|_1*Cn?u3msPgKuaVD#=V<|9^05K98K z^t5-UNMl3S7RBrNqtRPpEJQey7&sgPyreQ&&n{NUd13TUH8vs+$*8S$U?{~w9;2gyD-oo^tExD&`Pfw@y8CaZ~WUny~(IH}0ih7kB?KSw5k=$!6Mi2+*D6BpDmppxcSHm9M3zT%Wc$Ab(*GIZitspi2D}O# zz(b+;g;L1(zbG^^_~qc0!9Brp<1fayjeCtNj3(nWqbTr`*f#?o4vfdWewDtLeRulm zy+86^ijIc=3{zm30>c#ex1+#{Gqw4W!E8`cT;>p)mwq7W@IhoqZuEQ9}f1$QAtsg>%1=m)t z?FVt+iMDd(K{W8T6ptkbz&n{r4RDvlI1%4y`)M3j9yO-*3Koo%W1P_z{{rA)qa;Yq1(rQQOFvPzAxWSm!!`cBa=TW!Rt|#7?aI*b8tX$`xeF31D zw5aBn77y*x0 ze$+wMqjC65V`V3xnP6pM1Y)HUDJWv?bsR5^!!wPI9jJL?`1Qk6jH&z4T0Wlou-_;- zALS(_jHj;e?b=+A5g_#JzYqKT6Wak+RK&!nZJpbD;&J}Bap;hDdK=1^s#8_#_Crk! zl}=LrFb?&2C+q_R(=-zVirHVrq4T{H_F5%XZHQE5e;J1wy%Xa0lFbLZs@m|EacG-& zLaSA>J<;3J#{M!6ReQ&^pu|*okj{txGY*~N9oKBVuY*31{xc3O^p0z?-jn1={~3o$ zytB^3d$w3e)7IS&y&cSLa8QyzjYAP{X(OPHrpoA3?l#t(3&2xOVL~>vb^8#cU@n}F zep-avw$$Z$ZS*Nu8WrcDI@3K{Q;9So_E{vJjlNTjg$;l=RZuyKi<}om-%?}s9@JR1 zij6KeXaOazn1im8`f-12^pzW{&$jAvD^+$~_qRsh7-QpZ)IBj%y>Fy3br)L8H`V*R zM#)Z;n^PS#qqh%;(zn@v_v!vpxX&JSSPv>kjHPgR&E9mNIK^G09rmHIy)q7-Z!Ct( z>TwDF2hTI+!@pE)RAR5;ds)`jmNX8|G3LW5R6u9#V_A>J!D+@uxObkQF*rEMm+YNjy9b*8SlN*V-$T!AI`eBVVRq28mSP^-PxNWTT%s5CN#{LF+UQV zjz2RN^axk6)eSSMRyT+=4m?d?gYZRZ=!RZB*4-0)Kf?7LZKxRg>icO@~!I9eDGDHw!>Qr_CQfommf;(Fmw17NML1kB0 z0=J1l36>l>NRi=dNFr9qne7<1YSQDvStQ=d{6vP%(7^!O2^--7cvlY@Vv#W(#{%5G z!B!L$ExIzf7nTq#x5DB(Cf7;w#N~XHqW=5v(BZw;*ly z5W$8yk8H=p9x8*j9d12LO6g%(Z6rrm@XSMmTs*dAXiRTLpCPs22qVgYR5g+VAV zR|hNGOySrxhD;n6f%fdT`P>(Hu52l+_JC+IB-k=SWLq4UDJI{Jt{w7!Sr@K!J0Zq< zLIrV@y$D?BFYxRTdEF7iRp@=SKGCutmkbt|AV}DcTtl!q7aord@g@WgTb>?;#|Sot zzCzDf43L;9LuH@@J_S5^dOOMgr=W{Rpl}uo!Yw2!o15v93hwq(xG{kzm+7Kafdot4 zqz5SU`mv!jBw4s4dT|Cv) zN}eu5Vp0zYZr3)4$SG_{7+@%a8M$#J!G{xPQ_$hEP~pFL+OMI36ZcanBzgE1%*Ht~R5b*MR=`cj9JZmc+D&A5PN71wgJRCqAPq3W z%H(7|o`Hh=KmWvf@aMzId_Oc4NcoU76Q+ye?ZRDEg_~Ydty|U6 z)Q-EbhXN)&{oSd=zMiJ;w*HFBj(rKJo!f>WYE{WXn$@g|cF%nU^-ywRRJb8>;xQzITn9?Yio#A|hD?FkMTP^?!uNJG^$ZOcuI+8` zrekM#Ra;Y6SG?nG9Y~P>l@nzBw-74eQj$cWJEA(6FJ=KZUyCICayW_=rK@&_5K2Q~ ztT=Io#>OWnkhspT;69SJNyLyF8k2{ivL%ryB->}B4DO*Vq>TfSe6S#xc}ohor0i&H zOtch~DL)I>AjG%#b{8~HL18%;>Ydzbccj`o3hNUmZUQab+$^mAih3`w`#+(;4a5bv zgAyuE?P6i)#jbUU7O3YPY4!GA*s6EQ#qJ=hpk=o*d!avc^91)V467D_prvWd|e zZ|>b!W+hBx&#Su=JuID?^?ol*`!E*m>sq~R(PHa$qC170ONZj+wpYEW)rscw8yZ;N zlALU5Ha(>2+Ld7JkJSLbW^Q%7FK)e~ODljq-BDrdsyDTFWuiCL-J7x?TXDBnPbcoU zp=2($59?^^+6Tdk0*zO%tXL>3K)nP-d}l>VQob4ld%Crn62l=c+kDkG=mZH@{CHxZb24p#CO&mmCJeYA)6mQkj(BPIC zx}gErD!sk&p202ysitwYNDc}O=lmNy^{0ba!#V%Dv*uFQaL&Ks zoPSXIJLd&$yV?9`k6>L+e!^aRj=kZWe_2B#^ZX?dDkE|u4yhDmlw@Q$=O1K|>{TU) z3oIaA7|F874Uu(5#>nBEe>p<2ncT@*X~~M=oPW7fyKt&Lk#Vo^7z_zFvqI(kvrh@g z{|k8LeP@Qf;hcY#I>1SpAz`LQa;4#%f2kJxOy;vVi!invYK7Z$vTz#C`8QNX#fHal z&cAq`4u*!q=y1-zEE#_|=O1%UDxgw>5{8j~ z)Nsx}y8Dc%F`V-+pIV2gGo15}OF2Y~x1aN`%a-pC^8doR_LiPRGO-u4@Axn5BUEI( zT;XIajs_2P8qWD=ZYFR)9V)5hhI9U9TNr4@Vt_=By`eHt0-pjd<-#X-kohHsbN=xI z$Ch?$IOm_uQ_Mz};hcYN3Z!fT4d?tj3YCU){^i)>ZF6Wi=U=YL!=8R{IOkvP>@lka4pb9E Date: Fri, 15 Jan 2021 17:23:13 -0800 Subject: [PATCH 023/175] Revert "adding nil check and some fixes" This reverts commit c0e0424e384a0ebf8dec0453ca3ebca80b166b4e. --- .vs/Docker-Provider/v16/.suo | Bin 32768 -> 0 bytes .vs/VSWorkspaceState.json | 6 ------ .vs/slnx.sqlite | Bin 200704 -> 0 bytes .../scripts/tomlparser-prom-customconfig.rb | 4 ++-- kubernetes/linux/main.sh | 9 --------- 5 files changed, 2 insertions(+), 17 deletions(-) delete mode 100644 .vs/Docker-Provider/v16/.suo delete mode 100644 .vs/VSWorkspaceState.json delete mode 100644 .vs/slnx.sqlite diff --git a/.vs/Docker-Provider/v16/.suo b/.vs/Docker-Provider/v16/.suo deleted file mode 100644 index 2df1c2fce1b495b95b9383d553ba04b50c2669c5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 32768 zcmeHQ3y>Ved7cvzvIJNN%=>jZBq0m!-o78lA#}QjETlt+PDrv?5VO0pcdOgSX?IW3 zNq1O0lqf{lfO*I`P{yuML>N^NP}q=AO3ERa5bUy5E}Oyxf};>j+*k*DC?@0Sf@^8dm`p0lo-W47eJw1YiMJ0nGq% z#SU-)oB+~07gx%x+1kAqSK?r}7I7b7DWKJmE}niie``7Z8}I8`@csA2_*WYlZBUQ# z{|NA&8vkGdg5K+mazK{>e5Q~8M&zYTAm7df%m6Uo99;GB&oNi&_~&??%<)f}&p?^H z<_C`d>j3)rH`X&h?`P%y4)Ehnz+Hg50eb*{1R!q+e}ecufIrpJ?#1=1TKqm-?*}}f z-9L!yLx6|1`~BMW6rWGr-ZYi%f2!S2+;-O~GpB!Yb|@UEe^Ljej>a+z=gLjx{Bc-I z(&vBb{e#fMlduOAKvoOSeK-GZeiiyZ^+hxKcLywjR6fGId*D~~f7&!=>pu^9-=(69 zupBi`N&`|HHb)9qv-K8lQ>UKR(107x!DjIfA}@7o1~cvNQMGK5KmQl)pEK9yKQDCu zbtJy7Jurjct6Cg|N)q-D?E=~vX6sM=Z+qe5s!AnZJycb|1Ygo|^IB<93E00oq=0Js zrd7)~hEd277?48fnY79SNDssEl3^VyNXZ}`#2dW2VU6ke_B?&wKm9cH>#uYca(eBmowpA33$NIdhqtD-+%*t&V(z@gNh zlC}5Ep4+s(5dZhp zng4cJ@!1~qlUd2s6Vpv+m_}_~6^zl~-e!&+?NEz}WB=w`5WmV|Ev?=KKV(6CEg42!T>f0z$5>OK> zEB|ul1W{_~UzYef9ry_A)YcvVcSR5AMXhLqWl%qIzchCG?58!*-$p;7+4fH&ug-tU zd-l5&I4YyQ*;?zCp#n-WYOAx&h5nhPss;ELX}Hbe4pzWJ z-X>frKbWr+(9SFN+=*u{06i6Y9)El02S>GfQ^7J12zF}1_S`N0B!|r2HXZ< zs|Eof0O?k6jR5)p{eUQ73t#{c1H=IdfL=%Al@f6Xunj=_b_ZY>uoJKgFaj6_+z$8( zU^l?b{6CfUKS-Ex0NcjGgbS;pfHCBVRPp+RUzrI0o6+OLs*I!5zeQ>soBz2ZKs;d@ zt#V2sp_6Ax_EjN&{ur-wEHjwvf3z412l}5Vr_KhD4~28(rtZb<8(NYX{BBi>w>sZtAkR4Giuv$H zNEFV_QT(RIw1g}zX#WQ4bBzGA{HM&)OEAv2-Jpb80KH5p*u_1-n!q2m&@QCN>MWf* z3&Z{z@^h|aD20Cm zaIh8Dz#8V2e8>-uD(X*qmuUr+!rzXzrzMaC-&cVDN!$-&_A|Bv_3wEo>vDkE`qTAo zhW|$V>wrCp8ITfaQ02U+0Q5g~U7BT@#XsMGe=U0bI?Vj^IB@l081Wc-pwP<&j#9sk zsQu33Fq^TR+=gdAnRHOgVUvaQOltn*-(axfEMW1gJ zJfQ!fkjytyu>|#}@0~m~TYoR|KB*P52HMXSaJvt<1n#Vkzh2#~qh%~|X{w_Cdr;Oi zt-N;Bo_1XtS{r5D_Hk4cL98Wn{iz9Ys`F;+Z+3d|5yI1#y`jxnBi6+t+IT2Et zmbP{#GO6L_rL8Ly+vHSKPGovg$!*cFlDa{TD@bqdQBvt>I-`VF%jwKbX(g<_v9vHr zt!=54JiI2dN*P`+$A*+Ok#r^%P4vl$;dR4Zs=smxGjb>i9?PMO-V&~6o42`ptj_DB=^;7R zn;8m6lh(GKLn+0oBk7hCa-Wj2_Q;_DM64akU@SQtR}z_Z$z&`jr<&M8hT7HJ|LrsQ zza1N%yK(fjt-IHzU!VKLInRD4x$4Qs?!NTszb`rWz*#eo9iZu!)7RPG>rKDkK>x2E z-@NyU^Ji?j;wL}d{mw@lH?927>1TXz;Dt|Y^U@FOIsDuQh3IcBJX(tM`$_E_Y9BNX zHuQ|rqp`pkJ|V|%a{)pNiyu`F(Wb&8IZ7u}kD18(%e42W<`c0Ieh`+ctw@t#Gj>Ma2eChhKbT|7EiH! zS-OkmD4(v+<;I*7Ni&u+)$s;H%ToT(CPI& zP4)0zqxwj9!RzUOMct{MZRo`A9t(V+Jm=s<%#Obf)XjoieAb2->)HlrhbP2_H~H6& z+@qNN!f1zFytG&8izHSC`d*6#`oU>GO_!c-=xDLwZ&#b6tJCZ1v|E;YT~3SL-eI$} zb=uo3Hd~vm-EZsa>S}k6jtX>SQY&rGEQDLLIA9}cWZC>^QjHl@+h zIHolPI+CFwDneSWs3oqX`vZfiWIV2577V4WnH`ytkzKy!PM-sR7LUW`AbPvS=kwZt z-e&9adEH)rhhy}+bs;u1l!{qW%GMzzovDP{Kd%e5gUN8(x+R@VfT@04n`gPR&1-45 zw>d2?o6T=&bNGCgPDfXl&)@0ybhX(=57dRsU@|!n92$nwTnViZBkI08_Uq`fldA8Y zE-i2FUDslF_&D0^c2C&h@H;Gu(;K$9?4htlwt2l4B`AmFkSpX1g>ih+$Oy_dL^jrOGEG~b&zpF1yyk;>JsEoxZ_>{3WVf9876 zQmkyut`mja{gZ-KVZpB=ZN(jF&w$R1x?|_Wpptu3!=UR#e)|YB@5U0-*QdU?D|8g^^b1!vpBHwI|RnH$=p@n zEJ<8{TB6}Li=XTD>$D>B>tA^PvyqZ4ZpK`|e+90@&5--F@ZDl0e@=Upk6&OCtF9b* zZIA_nkT+bF<4!bk#u&K&n0PNkqFMZ3L|)x*L{I5HO~y>c8H}_lC%ID6(SpHi_Rb>Jr2+spL~qPWddp`ta?V(nsz&up7_^8m*v$ds#Q*8b>d) zf~z+0kQ|TTYDWrdAVy_v`RG7esit+6S zl^h*GRnt_^FNLh3@0R0S=!2x#ft(?5*Nzy+m%a~}`@!lhBaJ8L#NON5_DnDM;{qSa zqw#HCY=m=NsMc+6!w9F}oE$5Dl9ILXsnSzHtwL(ejI$86oWHDTZ*_ySKDF%5xx3kPVlfoJ9V1$7b5_&g`$Mt(70 zWjV~5Q85mb1SPl=@UEBI?}Ay@V;u zufhtEp@GxRh=8Azu#|3;xH@keL36!(sBu4$1S)GiTj{!}ke;Pyt03Cl13tS|Ugp|N zv~l;>bZogm3)iX1!E*KpcdS2Lmb$6&{gw6&i4#h-wLz#!(OMD~WjPz<0`RW35{O@6 zq|Ef-Z~S~yOajs2Tkp|q{5ppCMqzC=I6GQa%sAXhseKdA8Q8Xdn@GA z?+n+vPm%ZRRi!;1A+%2j?L?i4o=@Xg zy%cu$-Z7rC%+KX)mHDNUMbp;tzAy<<%y}<>b-7A z33`6TH?s0AZx?#0@F&yX#rc9Y5GT0|_Zm*Dc`75(rVW#}x{&AlBPoO^q za5tdN9i{v!V*cQ%(t6Jy#;>V~@50wsqEK6Ms(;j)%1J!#dmpU0)hGwVS>bZlLB7Ng z@&P-S@kQtQ?0Xu&Uq_q{n0gyYUqR(}B5JEsP}37$N%qcCHaImyJ4T!rkgNaC0@byZ zLmu>GYN_0ZFkTK7^SPV_I%_-Pyr7eFBzdflIC{>*>Kr2@mFrRZJ_MdLs;$0H4MRwD zwy06-E~f8uchYSYN`>F1r(O3NWJqGbDIPM)KoyB$y=x>NGGJw1?A%Fkb zNx1)SZ10`f{corC|7G|8t${XC?*AWA!WIks{t;Jpxz2Bv|MX}a1~8bz?+}>_{1QL$ z>Y+fEaY@`Udss^~hu=A_3_XGP=`W;*jx^}$rLLSM4{I6B;dd1+uA2H&2h#CR#`j-n zCCvwzt^cX-zhwJKzWlBA_g^rPTz&72fBlc|y!eNY{Kp~>{N~`Sf7@{Jf4yZ7K7H)S z;^WtS`p<0H91+WAj-?fTWw5>F(3uZ?`;MNj%YNP18tdD!^JRVrCJ-Eo#=?PEG%>Uz zpnf?f7DFn&Y7z-#aJ;Y&r*JL7XvX5h=UWnykzG7m%b=dI&HE8gY_`!6+Y;MImpq0! zup*k_q22){Wzj!DV;Zx+I?Re%=Yp8UCt(7Ev7tUxInb93q*EbWtnp;nYz-YAy^8#F z;{u*(M$IBPaBPw#5^hZ!F&+COs7-n%m|zW>Pc-~Ic4 z{i{#TdGz4mz5IOTxDLg%YQlpJ^|EAn`lr|lk6S-Hw({*&N50eX@w>m+zN&Tp>Ar9J z+GcJ~e)`nCv0ILO>s8j2!^jw_sXe}3TG@viXd;OZ+V;W0gTn;#$NdMAG|o4zhs`2< ziu^Sb^zm;duQ88$Vn5m(ABtt7mI#D`@u7+M-(m8lRW}PKrG!K(I6BQrBzJXkVG=NV z>Vvs}c;!)>s%6jEkw4m-yB-l_wnOL-#GWfBX2qe9*bP` zUi|l0-nq$}Jl4a$Y@9tAFVw%wrR}X#{l|O4{oQvxbNKMjKWW+Rzy5^9A+Ehni8KcDFQ|Nnk_fKUJc diff --git a/.vs/VSWorkspaceState.json b/.vs/VSWorkspaceState.json deleted file mode 100644 index 6b6114114..000000000 --- a/.vs/VSWorkspaceState.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "ExpandedNodes": [ - "" - ], - "PreviewInSolutionExplorer": false -} \ No newline at end of file diff --git a/.vs/slnx.sqlite b/.vs/slnx.sqlite deleted file mode 100644 index 0464e6e52586d2e40cc7da2eebeb8506e2c2038b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 200704 zcmeFa31AyX)i}PZU9Gf|cGo!^J8>KzIUL8465n>xG;tg!aec-|(lmBmSz6nwWl7PI z#BI`|^gd|m!BGk=EzkofZQ327EQV`-Gb;f3OT$&RHOfLh%0` z3D}X<ma0`WjNG(mrcrgsN|dbfX~J0FqOIn>oL z(pA~rzpLxq%0p+A$GGCCguj9QT>A0K^@lQx9j{FmE4GqOU~^A?Hv7GEdVc2A(=eA# zc^a0GaJ;f}ptl#_wSSyCRTgL?L>XFn^9nURj2y)^5w|^YuJJPkMYp8N?sJpLY=wM|}*TKq;(UF1fe#o}3tA7Nzf`6mEy&G9BgVFGj zfM1WAQyVmt1S?^vYj@XBSAS>MaOEK*MOP>uh|iC&PX$00wdo{KZ=-{|P!J%7&W>GO z8Bx2sdb`q51~XKE{rfwHI`?)At#4>`ocg?>kRI*y`eyWrT-IqX z%O-SX_Ok5g1@kfWx$HF1_Hs)fuf1&4L3-Sk?0!8OgN~Ln@}K1FwvyUf{<5v49U7U@>@%O_%r=-A zX=;H+gt?N=pykn)kz$y>PI{Q#eA-dgC^z-0XObAJY>UlS(&aX)EW06340@w_C_aJ1 zc6OXSZ~bLcnR75zWi}Zq4Ebi~jWC@N7P1pyTTbqjY$dZW^3A75K2tAbMdzk8jejO` z8+#w70%a#VO>U=ow^O`CvDK^=Cq!Ps72|KmuZWYySjFFjP{OH`VvvnW8=m!4V^qJ*h=cl_+$m?Cc}C>9tcg7 zKF#!P8X{)4r(8PHjEK5V6D-teYX>b-NnIU(khGlGVbIa^iI{#kp@%?g%X}0v+D%sE z`Vch!jgYc7xreuvl$G%(b&@CJjUh`$9*0<{smZaBe^^V~?+qrP9)gDo=RlRCAJQV&45-3QZAc2Ae3KA$tpdf*Q1PT%;NT48rzg+^C9gwWt)~f_u zKX#{geuk-qIJTMnxe;f9R9wSkQ&YQts>R(hS?_n(*ZbSu?M=E5$^Gq(&28;oh-=h! z-{zJEpS!-PrP1Bg+P2y4ZS?uwdP{S?zPY`*wMB>E$;Jj>OIxeEp{?HMZt}JG-QEUo zy}Qxh*4i{V0HMc3+Fz+wR}& zZkW`k8e7`^ZIe^2Zhw12eS@#r?{05x_5zU(M6Jz@?uM!6wwB4M$*Ik)Ks4oRZEmk` zcenYyz|&-t&pqjHYIfJRH2WJSeN!z{E%k1Fs=ZC0Y~SpjoNAv0Iq6OA$;svh_vW_E zn;SN7_O*LkTHI3&jsD5z$p&|;e{&lZrnhx-I}_$)i{8}eb5FJU^_F_Q0SfW!?k2s# z>-N?+Lg@_+^^^6j?eLcRGONgK(ny_x^$)j0I>U)jyrD)EId@}cRQJYp|3Gv!;>RDG z>o?UmZED!uP+wW!wxy|gOMT19Ho?kuUcL=dU#R=yo#D__U^)>cn-ardXbAZ9i_xG_ z%?NfT0zrQURjhhTAgX#2Q;dxr1-nD|Xl!gO5CY3_FsMhz#(a7- z4m(1y1)@*uuvg)Yhat2x9lFN|_4s0#Wi>NEr%M>|MyK^SloNt2rgSXY46yKB`eb7I zct>cyb8xhpGI;{gI3cYbj(el=M1;K?8eYiTu-Hy&M#&^%MJbLGRTh!!TtrHsT*OAf zG{ejcu^xT?(D*W5izqTRyTv>{IWn)ZTrsrCu&UD^~p7XAtn zC`h0nfr11I5-3QZAc2Ae3KA$tpdf*Q1PT%;NZ^zbSZot&`LtKbE^pjxEs=yqQ`nF` zrGs09Pv5^-0Ag756Q5e@=_P94;}-E|Ki^`j;I^0p&`*;)xl|O~G*gp*rOU|+WjtH@ zOTeg;m0N6qOww%8viPq$fJD+}IPI<46WZn4jP{`R(kWlB5Lu8wK>`H{6eLiPKtTcp z2^1tykU&8K1ql=+P>?`D0t-r@OxVgV;PfID3!C^H#|i6FSlws4O^6jjCz=Zua*z-( z{uF)ymLj1>&Ts<2Z}Ua%1?0O*`;+$H+DqUEFs%)0?b<5Mq5ik}ocf6Rkb1ouRmap$ zwLtJwJ`=Z~FFI6vgP(|Nr!;XL2D z+v#?yju#!zI6m%pi(}TY!?9HUu6&<-wOlXD_J{0O+Pmx)+Y`1sY$t37ZMASBzz?KP zO1DXuO6N)&t-rB;*?On-gtgmRDgIvkn)n{^4dSGDwpb%-!pp*Ug-3+jg;}9bXcEdT zf3ZAg`LyL0OWe|9S;hZ_{}KNve>b1xef(a2Juh=;*C>riTm>&n-oUY7B;tw8`|M9Y z{GBZu9bTz>afJsrK0_j+3C#q0&lYw{+QCU#p)&fyD z5uXUpW#v#hOJ906F{x+fz3M{xR?HJ75Sj{m;>Y4C=IR0x*9FinwM*;jTo8ko)`ET5*LhPb#l0Nys4Ow~?FTnFUiy=cyz zB8j;p-Y7V=WRh0sRCqq(UNkY82>FA$CpI%_NVnWii=r_Tu~|Ks5$(fhyVOlRfrv|y z9@UK0#gh`Z9HMFJRB(w`;>v-+KJP4uBB-kd{mKhiO^4@#jNRzuxDo4_khmU5mQj%? zAGa?Ni-+d|7s2b9=+~bwaod5ulZX&L7mMj#(P%i@8=judq+B*Gab>_~%ohzr;;}KK z#L_XWc)OHxbUJ9nEIUL~FkdKzgg^A-#%1SWxGWu^->A}X=pDresU&>iIjGoFaP_$o z*9hd`m=x5{2~Sd=FH+k*Mo}9NNZcNv8N%;$!OJQ%4KGCNop^|4QkU+hZ57Uc*wxqN zne(UeSvx9m?U2vl{?Qn8gh4&(gWdqYGBa2^!pOq@Sa)c*7wX3k?8URl)(uPCR?IEz zAJV=4`GvBp8KRAPV4$xj0KQ`AaYmVsWtQ8pPvV9l!+z)^2g1==cu%mx_UUus=z^J7 z4oci6mZm=(CyZp~S~oyDAm+L&5Su;qtM}7aM`G|YTj-Tq`y_50N=?1VnXO& z2~SbT+8*S^E=6^0GCH_=`X```MB}lEj-kF9qj8j;lU3cRn5u4xtB1k{AuY%f>WQ|N zIe&^|&0dLXCE}x^+n3Oz^PMx$$e`V%s4Di*F~Sfqp)wP!*-f+AKa7HiMZ7*e-C(Q+ zr=Bj_dlSTg(H#i_nR_N2gtIVW?m);r@14V;Wt~A^y^D5@C^Y+URClwU=JCz>Q#2Kw zlr@8f%su8zHtm$SK6t}OA~xfm(StA`;_JENVK+D-`rT1I5{{V@)OSeSzWfPblfWCD z&rQ;?L*fpTB*CD2DjJ3|{E=gBZ^-W^#X?!WfZOYf8n56E%wexHz3cYv5;u27S&%o( z&0zi6bf}92B07-ho&*l{hREjUf+>+5*k=83*T{9iakkspi+#(S-Ox*rhKH|zI zIy4fWU8Ch?be{4?V`>MlUN@D$=_M97NL(>z2dyg>Cw=n4)>|s-AyW>gqM!~<0u*WV zQ--m?=(OdVp%&5vFl6ZF=#ot^T4A&?IT`7ec|aq;2&NGj;l*y4P9eOvyR)l*xGNRB z5rgeg4+sq8MS>qJnqO2WaZ4aLFgI-kE!_a~E(Gl*3YZaES_{f1WZOBgt83suf7j5k zk@M2^l%CXd%%ERdLqq8RY|xgj!(8l=$7u4D9%lQ}wKRXvG#uG99fIM@NV9Sc2pvks z7;iit4NN9LL`H1&YC7g+XgIBg%689JzjV(JFRiZ5L(^iS>AreLbyXgk<}}S?zkC1v z)s=Z@noKlzedA-*6?tgdO*H*4etGq(JnD7*<}}e4fAYp3SC{9>sLe!knWd+4C2g_h zmp3Tr5~HGw4)?*p5%4CDp&vOY}PXR5h`XQd^fO5DG1y3@%3F zUF?M32RXyo=&(P1_b)3d-*(G#2k;7E;m~B*8$~@tCQ)VIe|ND=`!K63BbQ=3D$24n z9rnO#Nd)Q_ACYgOo6Ryr7Xj-BrG8$0k9v(duZGkKbwuq^ zJJi)`k@8FBbISXbw)qB{tyfqtwC=a=u&%Po;;+Q7i;s$* z5FZrp7H<)+6pxE>aZKzLo5V6v5PmLvUHF9XPT@KsCL9u4g;j#x@*B(dEuXPGWVzRJ zrR4%klVutIEB-nDoBYH4JNR4qi}(-hd+%D+9eAmd^{Wu#*Qx%YT%b?HG07sp~sRA zK{XN-KC=F#GdHY2xhu+rZDyiSgsjAIArG;a*9iyBv2!}i13sAeVR__3*A%7$Anj#q zg}w}0|D2oB$4pcwD}_!o6*V`Q2$FW8n8%sCK@}=2a1t7*-|)!2y-Mh^j4->(Nar9~ zALBEYfqDw}IwAo^L57kr8rBS3jA2L#p$#r)8x0p6HdqR;+$|g2Q15Zc{VGq%1J=S z=tY{anvW$y;TT%SBJ&p`T^`H8xi(=54`Y}p3(MtLI05T;mr%|d)5lU(sN!S3V~Kbm z2)SJ363Q%Wkmat$uE8G0O}|%|e#_W+qlwA+9$k-^zToP>X_vbdnKIT}#sUM@WV^6c z7~zI8*;ys*;fJ8vEoiqmfbAwnovdFX)ES|M4!L=S&<+LUTMhSMqfY6R+}jkPf=@l} z&v-f~<7tyaSk5TtQG|y2>A|w{+et>(2z~tNmXkZz3H$O9Wi25)T7<(u_DYwHJDP;K zGe|}%A+=Q8t_f8ZP}sqMItQQNao3Yq%xmKrL0kI*cAa5tC~RW1lndK=L-);Dah}~P z!a6g5^ebNGZ9|qa^P+7GR+eX-Bf@$zU*sz;X6rJcp3gOybYqV)EW^`b3Fjr_5eI$l zrXpc&iU31`D}`vl3hw8a**uiV$?K;FYd!Z3F(adh)r>@ zQs^R~3)v9|uuJ8)9rj}#8|H3LBv#K|h2xD8;>BLYmKP=XBH1gPGL%+kAdI=(EjVM4 zj)40uk@;iT4#_jwV2R8hT@3EABp|gT6UDKW2Bmgk4r7ZXVPxkeg8C16T$1{wB8zMp z?nd(T@?$YZv8DJ%9A7bts0y=K7F$#pdm|TOoun2T+?ALh^*EOCq+vaRE}Z!*|J_3H4>lF+S9&4IG7?lwS}h*n|&YzIw?$Olki3ML==xfi^Z{f zz6qygFg?^Ee>0K9F))PAqM?f>}?2@nuLdoXkDQr%ytAq}IVLMX`@^LELQZwq(Q=m%2C_sk2H(+O_0d9ZTH>*d@ zn@rq|WkS0V4I3*NaW^a#>WnzpkE8j$9tSwG_9iD29`(lIb8Dz_=q@*7%f|K!L(!!B zSX;}bnE@?LmtfOLE#6D9=@?IJD=9J`$G(-l*QIY1#`#XeC~z8EejjcSr-*ay_q|y8 zARN=a+%!&;kOZ{O-Q!N1Z^^8~W#4KZ`L-;o!UCBN?WOwXp1#PvulF-7xfS7 zZ`GHyVf)?Muk5#LKi7V&J*$1geyjcUu!n!CdQ>~#alPXTShpY59(AmDR5?~SmN;Av ztNa)FW%-v{hiX;+s{BFgQ@*L43)Y1_$`i_CnqT>d^04w=@F%!axlOqRyb7*TlG=y0 zuV{bJ-s)@z>p-1zjdrVcrL)|*%&9r;PTuiH$8WR;g`2e_%Ed}T32PTBD-@Svb^XQl zvUZ>Pg8D=CyI`yMn)*0+E__D)nEFBW0rg(>F7U0kM~P?o5-s)yBCRR=4^LDzZC zuRFi&e8l-_=N{({@uT9y>ag0U?g87zR<&8(q}GCsqg*Xf7pYF=H?Cj0UQk|Co>#uF zd>gDDpHc#fPZ?JZDEpKirAyhav@4B@Td7g1T;F#+N&YwAd;C$BkVdwju_c-rx zzR7t5*h?;P9&v`8Q{Yjt-`Vf@y5n)j=NzAKe9-Z8#}6IfcD&c|PRH$zTO47qnhZGh zIJP@l9h>C!a;5!A`xov1ZvUwLA^Uxf3C98Xi}FL@een)?M!rBk7i=kewd>_Jd9(ae z`9I~S}MMScVLXS_~6CP(B_xk$Floc;IqU)q0a|4;kV;$Ov=?MLll`?P(+ ze!xCp-(%lyZ?$i-*Vrq-2BX<+w!hk5w!LI~-u9gBDe&xg)b<(MM{Ez;?zP=%d$a9E z+f}wpZAWcku+dCt@30-P4cPYBw%b~5n`||<3R{Uyv)QD-N-s+`QMwAeLyk&eXqo2)TJHsKk~dp#v|eSs)Oyq!woY3otOu+E);-qk)>i8#YmK$S zT4L42m&E7A=ftPPC%`WB8S#EGDc&XCCf+1oBhHJ5#enD)4~qN5Zm~mb7wg4Zu}WMn zE)r#t7ycmpN_au|zVM9jHQ_Pgv%<%OhlTrvyM)_}|DYe9@=p?Y%M$*pHo;=yZ|6VE z@P`=wH-DA48Mor`x(BE;d>drhv9cK{4R#y$?)9_ z-^K7d7`~I?I~aaD!ME{mWB7K4-%9W;{A~o^%D;u-H!}=c3CQ7A{*46R%-@RQY2a_6 z=w=dn4Sy5EZ(#UFhHqf_^$cIn@O1<)<*#M<8iL38s|lXuuVVO0hOc1wa)vKs_$0$g zhF{06~pBWuVlE4;T718MSeL&r4+%( zk{GxQk%fnkA7u-_6ccby^Gg_B%{zk5cpniXNfp^AvrKqW?g&nENb6|4z|oDEc&_johaY)p4IBMZBN;1j8R^ z_+t!zl;Mvs{9%Sa#PGi{{6U64!0^KiKg95Z1izPiKNh=zdw`<%QS@Gl-a}~K&fU-O zeGK2r@I4H_o8fmc{7#1NX810K-@)*m1mDKp!SLG|ejCHLGyGO$V?B2pdAyZ-3&U?_ z_)QGIk>G2&TN%EE;hPD*hP#R3H!yr7!#6PedV;U!u4niKLF@YBmI8Wd)0*?}Sgun!W zaROrqOQQrHCNP4qC`@38z&Qeg1kMt8A%Ot`X9%1oa0=n1PM{y5<|A+tq3T6AegT0K z1fGv@Y#iaiF@(w?0?#AxAi|z=2|PgHega1c93gO+z##(n5jaTT0D=7k_7T{Nu%?H= za|rAva4&&-5L$O5T-Qb5E&@AoELCU)e2j$bAaFZ@XA`)Mz_SS4O5hd(+X-wVu$90T z0-FhJBCrwjZ$MCwU^9YE2s{Yf2sR?9L$CosErRt3Y7oc>)*^uQw!q1nhWRQgLTv>? zRTBl7w~&A$gt7x+(Mp6X${?2G7vt~J?Q@)nzm}Acrusbp3G%cQ;o>D2#uxKn zBCV09V;BE8J{9qglBvh4{U^=9;iSP|#+0I+KpQMItR#C;!T^E%wt!iH(tgFFJ;@2$ zowy>Y*1oIVseM4ZT05qNv`MW)tJdtW?*EPYJ@pH)j((SVmFiOm)m>_nTBEL1HRUDc zS>>b3HA+-D555IxRn{ny>ldykTo1rn_lRrM)d(xwa#xW{bbjCYAI=Ay?{Z%5JmB02 zD|sGv`=4<<=J=@NZpY1FHApyS9Al0_Shvsp_8Qx>wolsTY<;#JwrZPI`nB{4>7;Z}YLJRy4fsdv&#m9K zK5qS(^=+`;i&%TD&DJvUkK&84viq9&S@9uQ)4fT&UW|$th`Yr)aS{2V;B&BAyHQBM zPJgqo0)+o_%TtyQTJEyE8NMs%CEpc1#ea_f2!9jb#b3rx^9Mn4OT^Wdj9q#d9u={S zH}?9KLuFzspMSUCx|>iM4%g?c6?^#8ZSb$_a*8E9-QxEhUn17vZ__6K_yy}A7xRvK zzHNRO|0=|78Dtq7{VTZ3-QvNF_)~24ui!4L5&JT!^KJI8;7(SHof&l0c%0tuU%@41 zv6!P9{)@y4i)qLIHty}K#V-DooBk`f*HM|6xBZL4%S1QGiG>+E`9+};aT5faKCU3f z+@h5$#OBQKRFXMTN9H7Xw*SW`mWz#988Fwgirg6HOIDs6!`$aq=f*Inx$4{)<|R|gBIdTWXpy*@H#-IRE+({j+yZ*emBl4M3(ifV zLtM@=*8o*4hd{%%Y-y2L1&=B30N+KLSY}E22f(~fdJ}UI7#lY~X)9b}8E3c;$d%$Y z%ffyFppg(p4VM6utH8Qlq$L?Kbl(X)PditOd-zkk3-laVCT`*k*8tegX8x!3OU1fW zaLQv~rP$6d;4)CtL;6L^OMttLdz&U!K=FoaYR~@6=bCd=u>#64;y1a(}8gLfj4wh_)Ui=&MZ{ngLvwSrqgR$(;yl^3-NpJorOXCIj|ayHm|2CHFaI^`C7&j4v^`8U~n`j>#1GPc=a z!9vDAW20U5k^#-|zGI|nNC?|=_Z?j- z*78}q?xoRUvCarg?YoyACe=h+74BA+Mu=iU0v0bx|5j8o^cwr`npnk|H{eO6u>((r zt<)BL(JU#L?V=l-@I@CE!C;r(hCjqzNs^^D;t!5xKC2q3vz*O%-x2b%ncMN2&s?Ul zA-`_R262>6?X;ir1B~{Kpg5&SPrWI>uB}4cYFThw{#Ncyc5w+ucIFAMnOpO|1nEc% zZO#|XkT!X$?fIgB6J}dugPzO^bcbHF84tvh*)A^PVVB+_RDt?NHrf~ZHl(#u?4T4_ zjZCjIf8^0Zm^#&7{kmqNQ=50|ui@TMC2qGEyY6{5?62W&TqU-rD6;qLui~;Tx|BpVxmR z5DP~e=KOQ$BT&p&?`D0tE>aBv6n*K>`H{6eLiP zKtTcp3H)nHpc!s39IZK%Yar`eJZ+x(#wJg5LrZ(JfXU9}ipk7maF^d`&6!+ZnUSmo z?)Mw5J(JrUNwS8|e8r3vXSJKtTcp2^1tykU&8K z1ql=+P>?`D0tE>aBv6n5$F+!~HT)}H|BEEqD_;K#B-txo|63qgt^CT@|G59}(yBP^ z7uvVA$Fz@V@6>M8PH3~*L2VD51W;9||AGVx5-3QZAc2Ae3KA$tpdf*Q1PT%;NT48r zf&~60B(T^eaTcDJxa%x%!v4eG+4_NR9WR2b0(hC1E{0?HA!PjNnIgC-07EXKA^nxV zDuVj~Fysgg>HcZ@_}?f2NwDzTbuK*6?_wvyi^v&+M`ZkcjGX*; z)JBM|mnf|jW8&oSzl1=8ETX{6F)O?uul)(%vIcAbUTq1So`1iZP`lMd%2UeQlv!o7 zVsky~`nW3yXVtr$Uvl2%yvjM}>~yYmDsW@p2OUYr1&$q#YB;s;`|{`D?D%8yd2$_` z*Y_R!C+*kUXYIS}itT&0>ueF*F57D9_tICS_es}C=Sr)szqLMKy#&r)-)NP@Z;9^^ zr^I^UmqNhu2g@y%jr?!;ck`RMr(azK?C*1Md~=wW@fhGCeL8@b+>&b*4g}&e1AW8! z4x8{87fj_#AamFAht|h>ogBX}>CDWSSsF>r5G+#8C=Jg3OP z!d+iel@xbFfd`W{nFT^A6CM2>y$46SJBKF@3=H)Q4|a5RP3#)z>*(%(g=L>QGhkzF z)%GqBZpA@3p%jH{aDQgyKn9f+jk_R&vM!nd>o2ED4vEU0ZJm&)xtX_Psrjh$g(=+i zWzMFZkg^^wf`^pCP$Co$%<0{JxE&=h6@Y6sb3>fmb&}Q70jcWhXsQu!6pk9&4Hw7h zcw$l3BhXu|YjyyA72L#%r3@tEkwkoCKBA|80hMT7zTLseC2*rH2yd4@nV2R(j`Kf| zo6m-*#zqp=h4*zrqwNR=hv|iwu{=SL>}lD(4Uz$$n4>|%(qugX`Mq-2SwP;|nL)na z8x43T;p8Idf4lT4xY9dSTAnD#Mwi`N)1`KYX5eg^U2t|6_S{(3BarWs8@B*CQ~@%% zM~`=g=itN~-QOJ=i2C(tZU`jWC~s=Lrij)yg#*mO$vOUv2Oxb!u5C_}4n_l^ zxKULZ4?y}MxdJ|tGm4#<2*8a$6Yxhd5IiJrXas`VTA8#9CP{KtQq+(ueTs7r+Lku} zEo(zvbCLKw0g%o8^1gbA8W65k0uq8-v5@cItYl>zM9?`?mcDp%zX@ z09n3Mc5j9(S^MhOk3rpq{BWgrIOGjxJp%b#<>pO5ZnR=rmbn~Oyg3Mx-6F5`K(f`V z3FAX>C(;p}@NN1DWY^0TZXjdT*WvdQ8Uq8t)v{+J5Rf**^WCvdxWI1E8=vWbJ7tgP z+0SzPQh8OKks;3;6=GmHeq45MAo)SPkdw8HH;1!FeX}M>kmvZtwpF#K$}TRu*XPZS zyoV_}@*I!ZR@MM{Ss6$Lxr@c4*lGxp<8$(=b*C!q0=afA#FI9~^Sc6YZzxUrfPdnMHyVPuAnQ?%ms^*W0d+B4FAi0PM-AdXIljWW zas`C5syqs*;T~{?kIk7@swFq(}!%3drpRmhyMdWQr!`?eMdM?WfrI7WQu-R%f zAU>UxW84Sib`=O)TS;{oLIEwvpMX)yZ84|o( zZg=ID)g3wl^8$FJ%}$b2*~Jrg%4%?OlZC*&3fq(oH> z`amk>)btc;2N?-XVR9@XZ;&A!>y<{jj$U&L`XtAuSta(;;%~ zT)Dw!P7_B#bwl@z!Z-nq2HRU^h#c#an<}*_{kw`!E{)~dsL2|5B?zKXyzfUub zHOL!9$nDHJRjf|lD5R+mL<6w$A>9hO&3cq$)$%I1!)3uP5R>F89^$L2NEhhnKDT#p z5Hm7Q zQ{|PHTpZk7cZu_6*Bg}^ZS~p?twz>0#de?i8*NxiYPYE&`IGWu+qcxu!+HM4wNI(L z$lJnR0gUXEjqTFdKv9Gq*+b3N~ zXP@nmtHSYThZSzXt8hKw{I0#!S?xGvf1};zY_|_6Kh(Y^-)Fn^)man@sR|M(NT48r zf&~5rB!IUX_jv;$?j&AO4%!7yoTxV(!F$gQ{U(8tu!IF&F9L?p`8Y-zE6^B54)*Lp zEd^6Y@y2Jf)_xeT7dEm=&-1Ye-UgmE+lMjCpxvbtJt>5j$s1w7LOdJ?n-vx{w_4~) z$I$gcwuT)v^E!*X4JSea!@w}+a3Nk(JTx~F)%5^gPuw>P)+|_g#%A!AIgP(M zXZphaDf(;RXh_FjBjJQ^#*eo_<1*5RKl;EXFo{1<-Snbh@CU5!!6+ZPAX_jKWx`e- zR^`(=*tDo@gYzi-p~1fM@d|9?rEz(K+>YZN&pAtuF}%;7nwudZT{eRy0TUd;C~6|k zjymrQWOvXkyK|9!c+M8eT!LOo$SQ*#|L*EMYyvEe*z zV(1Jw-Dl=>FvTetE|Gx&=*c!{=6JPszAXRv6&Tw!*x2%zAvCEL-GeBCcHf<0c$9Ojbn?@?uyJ-T1QER>F;DZtIZ3 zWT+!{zYS)J+AIoU%3F`Egjw!t%+zqZdTo|%Zmo%SjhS{eN} z{6hJu@&n~N%D0p!l`kouS3V8541Pd)pK_1#4&|-Nt;!9`)nKbQ4z`Jia-rf^&R5P= zhLm1qx3WXoqBJQU^>ymJ8dYaizdEk&R|nL+YKOW-ZB*;j)$p=~zk&n`5-3QZAc2Ae z3KA$tpdf*Q1PT%;NMJz;kek`B=S~uj>mvjv2#gaLBQQ$fVFDurh6xN2I7eWRz*zz> zM0invz!?Ik37jHOC(uuzkHAR+y#!u>@Yn=_=My+i;24322t1F#g9M&S-~oh3_Y*iu z;0S@k1P&3nkHA3!(LQM9*vlswejRlzzm%d&C^|vWaf&Xc=pu^dDLO{cQG%}9Nl^zy zJFvL;b^^~Pa2tVV5xAAWEd;g`*hXM0fh`0!6WByxBY_PB))Tmyz)b{t2y_#;k-$0v zHxO7$;CcdU2wX?tS_0P)xSGIf0;>qDB(Q?ORRoq3xRSs!0#^{YoWN27OAscO5m-#% zQUaF{xR}621QsETX#}bSDg=^G0Al15fEf7%AVxj`h>=eKVm3k}5lB7&h>;HfVgdoD`B4S>I?~ zCcXtM4L1lj%heVq|9Uu9;MjtlCvQa332tS)g*$13?++~8bso1px7w|>HvG7#9a(^I}@CGY~;|YHtJl3$ezI_brWO_6b4aD@ZzG1StGR91fV><(( zbXrg6@QgPa@vzAK;5rOmBc3>M4UUOP5!~2-jpY14hMPVwxy{kDQwI+gJ;laxaF#IU#PzkJRn0_+Ux z;N|N}OCf0i4@6`m@!E+YO9uIv0C&gNmJOa=q78R+>_(CJeZavYp0 zk%xu5!rxpU;A8Gh1mkqW20e|_&EW(%YGUcHVQXZicf?})++=VbcZpLke-U_tV)6QG z*9x!#=;(vKv!?oae#{%eD69i|sv3$NX#`@0Vi(L$E@yK5fSBw0;qFmtf!?7Xw{MZYtK3I?E3Yiapce`CU9Dd`Xe>wy3bYQ z>~Y*6UuXZeO_25pH^H~~LzZ*$F5wG-1qtMpfcS990XLuA2V;VMYe*^l?uHfHjZ|_Ey%V0EoBmkd%n^PM!lmshbsB3rEP*;Cv*Kp+_BL$f10`d9r^{D{J zqBflb>TPsz7YYKz(Alx8DT&9y0pv*cC~@Amc3#6mFQ6T=)x~x5Os!wiMdcNo#LoXZgSd6CZJ!z7xK{k z38y}92;AX_!EYj$b=u3a30;}JEIWF^d`x{VI}NnG+|tKuFB^4`9(Uz9>2cYqvY(xq zJuW*M$~Sskc9gl#oI#Ic6`GIdoR*W(<4C!uZ!mvbdojAT`K65Zp23Ylm}q)$%7b(h`kgF~11}MgV2Z z!K!t%zkA%ZOty26K!NLRrolNK&zEC7UzypXBVe68M7lvaO^Y8ky1T zGoR$lHkcV{YJo8dYIjO+ELahH}$G#k{GLOi_KQjK7qq_cAPzL{bf^`b1+q9HW?}m`DW*hFr5(=vJ+riPVSU!C9^Q{&8J5`Q!ivi z=cY7`eH&#-hFhSMoe!(2YW*Iu?)q=R&4 zdXP3rE1OSiW(UcRHg%Egs8`ojD9`Cueg>Mzf_0D{+{rkG`V{`X3YFk*8{xaj_x|IR zJG*n$z+P`GJ}?>6qepa5i_#-6(-te)=Qx!}AoW<=C|nqZ+$TRUixO5h9pgQVre4ug)S!x0^a6S(!3^(bVt zo2 zlk@)Wt^<|Xah2Yf;yBPfvKRVI=jhOI_x`S2nvP+ns$u^ch9ySl2ys0yYunxa%8ubm zIMpvh2k-W8tX!xABR#VwQW83YV-g%DGNR)%mAi)q`f^H5_gv6N2Kv*Fl*{xpaz5zE z!(JxI?qSa>XE!Gc!v68&l{I5Em4F5fcM7betk+7XrSF$jbx1v!rs?kQhgIk~u{lh8M$B{d8cX24hPzF z9)_65YcD^xm?*H<)Md@l#wUWbU*S`6<`|jIjpU!Tgd+1iM!dQaGo5*g)$|n0CaiSh zdKJ^`-?;KNlx6eVy@o5P^fYjKJ^odwHEgmO={h&la76b>^UXpFSZeS?X*ja(3{v32 zpCDGwBJ~T~gJF0~w~bWv(uF!piXwkjdAmz04cH!1zbON-U}z{h z9n-@OTYJo0R1#(=NqqPHR+KVe_LKEx)FrC419i!IfHJ^P{l&@v1Bxf1?U!>}&k% zZGPbWfBThtIqi?yZ?yl?eyn{DzW#qwdkpRX{Dk&z@a_LS+MU{4w41bR;XD1~T0#qH z)7k}a3c!%oqwUhRX)SODz?`D0tE>aBv6n*K>`H{6eLiP!2f?GKtGe;PthnvBNPo&G(^!piUuhf zps1gsK8kuN>Y?Zyin=MKUabrh|oXbnZHDXONZ zilR!2Dkxe-Q8`5`DJr9A1x3p#Dy67|qGc2nQ?!(#B@`{DXc0w46loNx6e$$BC~{Kd zph%|3P7%HQ?>gi1KN=`fWL*De6)ZA$9a;aY!<_a%+RNInwHLLY!U=%i)1J}3p*^8} z3C;lgcd!F|SbJD|ADjYsxAu0h1>B2f!xKr|s1` zwX?N$tqJS`8??1rrB;peipHrWO69S)99|s%3XVp)_ z8G#>AA5iZDJHZ`rO5m;Pjq0`P6<{m4SUsX1R)gxaIti!woeRts{t6N(NT48rf&>Z@ zC`h0nfr11I5-3QZAc2Ae{x?WK6gYlAf>8t`2!;_1A=rmt5WxU~egu68dJ*&>I0r#D zg1rd#AlQwd3&Ab~od|X!=s>Uo!FB{^BiM%EECgE-Y(dbDpbbGQf))hL2$~QyB4|KR zk6<%`O$a;)+z2)zs6(&;K`nyy2x<_lL$DUX8U(8mR3oTDP>G-d!72ph2v#B}L$CtD zas;IaN)RkVP>f(Hf+YwRBUpr>2!Vz`MW7&XA#fsaAdnH*5!et&2&@Q11Oft!AaFAJ z|694oIPEsA+um+_9)AAc{7b&yVs&yBest~0XXm+E?mA0LlW%iNqp#h)S#PL!H?{cM z+}?)fNq19|KH1hd+0;Dc^LB%eX%NmIg9A|Xi_v?o8c+Lx8>8W_4!t8f9jo3_d5C(| zRc{J~ec{klV7i*QfK_iwgks()-JK4ABkg>%y5AHM3gg`yTk2ak*KY*SfPnr=rQHYv!C1qzGY+x?vmd+1Yb75 zr{!DmP@MtzuzqmR^s{lIKAmg^t_Rieexk_y5wEG}sNqGsWvse$%b0PF2R)~8%s8`a zECR<*#m3CXqR?Y}JROmU2fX;k#^8_+INN0m@*B%KKf{!Y9qR<=c8pc0dHcumK+l>4 zpZ$Oj4pxCP8voaD^G{_24mN9S@W72gb8wi$5k2i=gHPd}i*?!>&~|A|cOOiY1m{XeAzFfqU>1VMV!b`1Jxh<$7LPZWunp?^X*%_ElX zxj8tli*pZ(GR}?l9GMw)e_PX(zuD_<(sh^_o7&pl?S8-CUGMidwMBG8AGk#&V%`29| zY+IOZ*`QvSZBvK*jpdo={<*U)%&_bGVK<%5us9dKx*67dR-g{&`$2wNyCL)IEX~c6 zt*ySMdUtzkGtAhHjXs#Mo2T5(dRu+-RDE+pb4vrc)Dus%9){zVZ@C`h0nfr11I z5-3QZAc2Ae3KIA~BmwI2Z#{+ozvBE8*CWnyw*Tk4Q~i?qVb@x9PQ6;aQz>_SPyMd# zQ?`3uPFKWrh0>({N(m~DDercDSh-v|$2H_?v;EZhW9Jjj55Y}=*Eo=|US+BGXS!=Dl_^kLr@iuYR^#}3n1^%lMHQ{C9yTT*F?ZT|kCo~D=mcLk@vwYfe zizRO9v8>|%!vBbWl)sx#@;-hqzaFe9FOiC~EvevT$s0HpggxoVywCph!{6Dm;So!D zRD!E0WC^aOOB|bsM&>+zIB~4hh+cJA;_6b-{>bcfX8iJq#8nybnNb{P`F0INfv15! z9raFm(2JnR$haaTac)zr8@ALk2v*NYTr(2*-C(o{#od#Ecn(onP=dQGWGS48PlVyB znv5JuXC)3_dNwhsXU43$kiHeWwj)3;Av+dNF;^FmIJjs+n$f+%_{_vy*sljCh;IQo z%inV$Tttx~u9=azZ7E{7Q!EeG!-?(#l5g?63t|)c1q$pARq5VbM6#L z%pLJY;k?mI(h8jl&j-dqIMj2JTovSr%}g57E%(!+Xv{=xRu5)G`!L!rb<=B0$ex!T z)r{1|lM=TaqG{?>aEVvq%7MW??<}}k1a;M*UwMJVk(v(Afj8S1myJtY8SokNMFWv| zY|JRJbPOxrE~Rc|IcUTzJ492kds~!L_<0yEOGoGhnW^yNgH#eea#>s|*j3EA8-W}w zjzRsL@MNqroCw89ZTA>i0*$-}XofZ(kT~)KPlAo&p|9{}v;kx!`{-ZaeBdW{m+q&H z7W`Fq^>umX{E#7ySvx9m?U3Q%{?Qoph(SH-gKh!8jB**-2qO#oW8Ival?>dKXjG@;pi;9Cmi_N zr_Y6>3uaz9C~=!un*MN{Fp`;T-2m;0nCq@UZ1&Wz-cMg0iQ&GRg zF^tgCbud3cs3#JL=#YyZGh#Y+NZeuIF9J?z?kVsqbNl>}V{UMAaYqud8FxJF_66Kt zU)12-9hk$;W_rc#+a+%9jItp7=Vq|}Y&z5#%y|+x)EknTp9`iWbzqx~OX3KXpENu* z74QYTK@VI)H34G+!I2p_y_fv*(b>osm;eVlzsH>My0hpwkeM&72s7R2R$!nchXG3X z>Wg2sC1WObhojz@S;UR)5;qE^vZ34^hB}$igHT!6%(0TZQJ;IZEhABLo5US7B}&uI z#@u>n8k&LI7lg?Ngl($M4Xw0$PwR2_QKKig1E7CEo0!$-&9t>Gbh1WT)Gu(xh6$fG zq}d~8(z<3kgdwTd3%NxiZkV|Jv3$gpO>{&gP1|UB8GWX_(U{ty@w=(~O)s&yLE?&` z!N;N78BM*S9x~-%Dx#N%!3G&#+X6#?K~TOKY9UPkBL%p)rK3wW!61dv#++oNTjl|c z03(=2V1yUDVLFBI-tNw>{^71v@J0-_OFbYkke3m>s7~URKyYAg+6Y>@fp+G-L;*8G zOKY(+!@xbTt83r@9GyID#4KG;=}Aqey78cN4qgVyy`uE?w%lxkVp0_Zh#oLOY0 zjCqcvvMgOoS@2AQCO92}fz2RZxd!S5n8O%vJRS{9CP0ct?2;m=78uB|nSeq?^DkXY z2PbSmQE;(C@{iqB({oF>*aDK0?NTI|fYzGIW!VZ;Q&I{~DkJ5GTu~K-heMNLaM24a zcRY+ z#!^Cot4OcSES9c#)G=-^PE2No-3$x>KG>vJRdsjWRx@0L;hhsWy zI@9-}1!$|d_opT;kkiA8n0MvgC+`5(_v36F_#Xur-EVekqTZcLW9Km zkNN~7hed+KErPJ=u*C>k?4)Gg*_gu!%fl@dAgm65hj~(B_6E0c4LR+HMge&V_Tg09 zpuxr2471?u*Q3FC1(M2t8V3W?=V(yoTc^l&q~1Cet>hNwcEAc#XEoIeA0Qh^9rZWxdki) zzjS=falhjV$9axg`LFV~56N%NQb6cK-g+c|y z)4qmrpEXJ#v1acwN+rr{F%(cM;{^dr=|^iyE@ZH?7k>u_M!U22T~qtuUcN!T5awDDMFnygZT6 zH1~dkVtEo2AABQN!TaZQm~7K=6-iKfFf}zl%;%$4?5v4~16Go#H;2AD3F=LKR@T{>)+a%&8Aual zu<8!OsvF-Rlab9yP;~mzX$&fN0z~g-6_A;vISIF5x+G6@RO zT1W?Gstjq<=*lFhM{bA)GebP!gFEz0)yZ-QGYd$Tt&7Hby3n&*t%Fz3)#9@^g^~S8l7Y7qQMK9v}o*^ zo3to>LlQLTeb|BwB?eZ^G5F{x9)r~k+8CkJ!rGDz5j0V4613~>Fa#}_hE@3932vpb1!QkMl^vb*`m%`D zCqaA10WwA8*CWC3JicreX-#r9te7)My*MAh)RaZJDOn9Blf0Cvl^vNiQ}17utO8}) zoWchm++@EXS0%j+_Hi59kS;DGpSAoV!x%!F1e;Bq;SbgVLg?wED7ruEYgEZc)EN_ zCta5Wb$=^rqWNdo^#*S%lc4isV}r_~hFLHlbGI7NRY_QHn1x82WCl%5av80}KnPka zSP~||kUoJ*5iFZCC_TwyiR*^KI2mH4T^%n0s;$Jnr3Yh7Kzw($l`|FIC0b)hwm+;L`6#FwKBPg z3B|M(!d+t&RhEPWD2jdxDjV7~_Lg*PZ4%bN*rsT#o2o?St%o%2x+K`-+S9Z!Dds1v zNGeo~VVT1IO;)ihk}ld<2BTq^dgtJNbna?{Y2aLvy-KNw6yPWu^1axoKKs1U08vdYn!N@o+d8 zgDpckG39KE&|vLJZW-A%8(~V&NatWzGU6j*dFC~r@$7pWE6o(tArrgSoQr1Wu**!HyRvw|26K2~aD-i6~ zxH(IiDNj&a-%KQN3_mg^Izl?SGEOrvY#(gz&6GiD4BKbszAwu`~z30b=Nom$Y*ZuiPt_^1bm5OeQC9RIpZAZx9?loc;a6s+&IE&*}JuNR?%3Z<%P7^D5 zKJEDsTPoI)VwpJ+87P{r6gz-u;jQ7KS}+NMeO6>14-XhWKAX=zIXAcK>?K~ zinxJ3l|^uUqJlnA6!ksz>HqsX=bm%#ow<{?&^r11NtJf)J->6#@BYsA+YidS4#Yd4 zx8Zeq=je6xuBo{rE|w6~M~$gOV^{oOqdC`=SCzNQ^__VD1s!bAf8^iAniU!lG)tPl zUn&1&wB>!;3rUmqE9D=f^b%Ed9J`lfy~1I8@ko6d)@XvM;25=1^+mSgs*ZMId|A`v*o2v&c>lMy&s1UAmNVYWt^k}Y(xFt>Ui zs_-I?St6!S!hp-k-o>cKS=+32rE02|l~ysvw~jHY$sORW*uomCPGcK#z%)e?RwpV6$W@+cj(;Tw_z8=m2L3{Ri7zUQ(lD^b(K9srXRtWlPRowotB^r*CCr z!uuXX7o0VY!n zWA+PIESkeGg?;VOq+I2enR*rIW(iHXGMRGKH2oC!lbL4EJhhTdH*+^DKbRyd_I~U} zs;-9Ez9EX(aSKu#$_s2aZRtq#nrowz&HQ|-0_E1=V*1#&rmikFRoSn2hq_zcGa5UZ zhpZT+*6&Ch#4>tn2F^`Sp2nDFU-^7!x#IU!Rr1fT>`G01^z|MOvgN>1!Bn+1rm~rK zUD?1ywo2iRBA`eu%yiP!!3y9 z5d@C&9ekmvj?A zq`+|m5Rc?S@ape6D$b~V&~xF#BVJl<2@ljtA|Vo>=n4xhDZV>QZ{><#ajRuBZhS6D^9%(7O{9DTN4o0s`0 zPhz^ZH1&*DpX0l|s}RXfHy5eV8nk1i=K{z{$3v)~@0UuSS~D&Yvpb_4}R;DWG=H!0xbcGD?|VtT*|^r|^nc%2X-qY|fnRuP1D_Av5qNiCXJCQnQtf#^pM)%z+!3stiGr59(g;~5O8Vlj5Rhbeh5@o{gV9t9hX6V(bI;bDs zvawJOcu0=b|aFUB=Igsv)%+HlKchVC>^_Ps*_O__XzFfn#Hk0v1-CxfbSe)pD4`00 zfu*SfMvc~9@S!%S``O}`18Y~*Sc;STD)ikR=jdAj5RF`Pu(kO~?^0!~US@rdaO7M) zPv2pCJDB_Cs!941kH=KP!(G~xPe8^i)vK5w;4oe&*R$PY5R-*+9UB`HBF9jnTsvMb zgIa-EL@2z(G*2kk@M`THa1g~@=iw`hJ6k*Xkj`ZXC$te%EQi}FpTig&vs{vyLP)aL z&#meQ+5R>NEbp?Eoy8^?7mOQWhvJ2^8Qsi6DG2bg5_|hnk!K=B9v~cSmhYcT?F2V8 z=5NL*yMe7hcH~*dGub~N()k!0DHq}%o;4(157I4RAbT z=`F7v<=bqGI})5{A;g;1!%Xeww2#3qFo`K+oUfVLOu1^LOkctF2%&*L z$P`=bm8JUbVe^;2WPl{zPPkwMv=h8N9^TX)(Z_o{3`}`m`2pJvXkD_&Gh#n$xyBh+ z(-P80Qx)0|R7Kefk;Ew@K&rtu+L&O}}dQu(>^JKsMe&@_75 z`kQ;&!4CW~@KYt=0SJ?@3A7`gY9zCUDf4j!UzT;Dm(4X2Zo$q%*#N~YDIEBC9AaYx z6#?*ryEz-9e*40gu~g?9b+-LZc~RB;Q}s2f%IBWR&zJ%#i>GQc>X`bA14yqR7k?k+ z(@Z*Xdg~|;vt^mrpsT!(DL>39Ypz5xwj5xmg1IEzL%rHd^;(ZdY}H)Oa`B>5USS_F zC>-&BFDpM3xI%(yl*^Lo*V(DDge__FP~rS;TQ^fIj#Vp|aNsDjYsXpr@^BRFx#)yO z9@-m(k(f#jK7$ssX<;_h$lSqr!6LT0L;edFE7rc)xb|i%Dg+hnYjUKdN&3zc1K8ea_o!wDmpgz9a)LP9` z&vf!K_Cwa+}Yu`!#c z4etBLGh2R(Dxr-W!`C3>}=&UF|w|m3gHvxP3wC;ifVCIE&ueg$w15X_Q8>J?}Ta2 z%uzTBZv1Obx3TwmeSVeC|d$Nyt$7ukFm2@;Pt`!eu zOwhuQ8BES29WT?pWQB392Z3C4_nz@Gmb<9o*azVXH# z{?R_&`&;k#j0=r(jMKH<+CnYt`J?9to=3cC&j&mg8s)~wz#jrX^FAK z?;%8Gm_8A=hc&oPkO2Z_hdo}up^aA==NO>|Old7!xSTr~jv!{4WRiqrIcKI;BMQxJ zLvS*%(DP%wl)lzgPMkeKTOvy2ZbSP@iLKdeUPnlXvxwS4!W5c369-3QGi@Iy8WZ@> zpfYay%`w*&kJlDiWtK!Vod&|)ayV_8RymjfZ|X>`gqynryD(+CRs|DIe%sAt0>B}c zSM5G&hPM7FiuH*>t;l3n{pL@M5-unmiMJZjx=PsUASN<>yRegR-TB09R;XT4Fa*zRw`x))|GILdyyv4PAU?1xDGb(Zg#BwqcvC%oFCZt7PSjcf(jYgUgps*Q!}@zJT&& zG?VS>((7ri=GT>8$M$Wm3C&iNUOPrB6IG>_^cosnoCxt`j!*iH)Rk~Us>*T4`XAZ@ zvx;q!JoBpf3r%j*RZ4lLM5{B;X|bh|bd}{4_0~LXr#qDBOBhm>KaS8!96n8#IA1JZ zdNG@sS>8*^Ct_N$%~45tF6;TLM0C5`(zDqwo@Prw*YD^Lbe83J^ijS-b39ME^5uv& zO{GnO4abDB+5-7xs?fXpQ}jVOJ~j`H(aP0qpT_un?Huw?Aq5JtIb~p+A{Y*s0V2p$*MV?FAvPtHrtBxxV8%wPrD1w$P_tk8sC2#X|jQmB>86EkYrYsK3yso@JtcJq_=w6x{RV)lXr^_M0>$M*$=L z_WMC?Jcm=BkJ8+>duDxbytYK81Ig0!F^zchU+$eq=tyW^vPe%DU9nMZilpn=zPy%#b z(E5}9#Yx)UD#4))5-fBJgRxuxbDad<%z?2sN#P^$`*Q7^W43xL|3$OZ;nMRh^9_l~ z%oR#`ijFnQX~xB~=6h^7rSDCaQvhPXzst^jSBIx>Fh;`7+lj8<3pBON)@Et7`8GP? zQlLJ^SW(vrBUovNsK|!S;o$V2LG823HR^J2cuAm5zgfS+^GB^&J5?JSdoA{K?C#j5 zu^pZ#W79qFiT=~m7=78Z0+#-3J;7*?`g(K+^6T9dT^=19c|+}wd^_?$3UpcM|U=@trQME zkrzVf^6n*sB+4$@+&J;>G2wF5p|Z8RR%yKgN}hAa zq!9hs60LpEnnDJ&X*EAex(SYx`XM^Ska8u3D;tU5;$!nZIxINEU>d>CSRZGp19O7% zKTYhI*_aI77ic--@IOU-H~D>#ii8p&u$dr=SB}p^bS{a4z3?@Ld$25gkTe0* zagOd>f;>PV<}7d;h9t=Swn3R^`>m1G=JY~Qzv=y(q7%PDhU66ljQH1zeF=3uNp8Z* zu8yPaSB5b=;;iqX?blWW{>6-0zMJ?@S&|z~{c$=Zt@36OKz$dfBjkI_49TO3KSl(! zN}XZSAEli>w@?P4Ft+I;o%-^%JiJBx^Bs)AMX05su!j z%ahh{^F(bG1IXp7H_=L^DJ0iL zrlJpH^ffxM;ct_WAv(&hX=T^7cA75N>qV$8b-0S=HOeX}zv6Zguudf_X`-N^MysX9 zrc@F+JSfqW@&--&JjKNC{+6bnvz)5^H~Vl$t1KA%H?*?=yqs$cRNZ)rpWVQq?ZbZZJ}lZ2xTTi()5=G(+}+kIpX(@b@G!X(=!5#r95M;(#+vryU^vHHF=6s_3 zlE#*SC_kW&5gvfXtBS!kFdENV?4K@fFw1CqTbsHiPdI&GmJ_)xFy(i~Ulr<#TE%%f zKUlAXshFHz=)e=drR{ww9WL~i$))>g7SV~21ZIX}EWiOQ2o zp{>Gty3BEmHPD7&7BZ;|x_jI_Oti0Ycn-DL`VpxSSI^OQh{}VeLtHga+bscGWV%E* z_rjI57n=x`b2%{Az2y};_Xh={CG$htdvOB0I<_pKqt;;}GQC%5bS_9%OKZa>bSZIH3d9W-nI{;vAHi-9u14icUVvo({e>EL_ zXkKz|(q2Qo%s0PO*g$yfEDFkKl${<|y|g!S1TC+X$EX3mb69d8&LGu4SzD9WiK>nE zJ6?;4pthD(v$Gnod8KT$7+V~eu0jWmSuLH&vK1#7I|bVv-4Fl<-||K| z(?J0)Ye>Uf4efW1#+%ti!qo8wn^@7AM+PCd^`sIo+n|MDx(GTPL8@sf+hB6oQ)=i8 z8$omA69QPbRNF6FkrR9{(S#~hkyvha?OI8&{|V-Cbh^bJ=W}NGoHcD+{DVH5-2lF4k$ozxs9-hkS@U_6hf$IVX0=ojI1!nu- z3V$~I{@5$ACu6tA24XeQ??j)7Y0=;4Bco?T%lyy#A3`L5qkjpq`u)Q98Q*n?-Jj)C zy|2K=ezA9xw@Cky{u%vhy;WbMy%N1dyIy@+y;r?dZBtKEqsq(CP~@t}n~@(xo`~ET z*%K)b|17U%J!ZFB1GcJwnQSu5~#z)ud9-@CbdYH zOMCT8A{*U;43f-HrlQ#*0;>dAni;gDs=a>A3(4^L|GW@kRQ)v z0l~njNT|uvKvrasfreVo16*ELU*o7SBZI6ogtLo*R<=4bgJd*I0EA5nHxq~yO6<#2 zP4qoGgTyqHNJk1X5|Csc3vf{escL9Ma{$Y23Q1?3nHl7*Arv;ac}fZt=4Oz;hVuS! zD7Pa+qFa(dLK_-!cPp}lo0vf!n_?u^u^3_HMbY873{uw=p^&dhs{qTfR%t}*$Ls;3 zye=9=-I!7@>n_P46S(!rWfn^xB{YP5o}m#lT(b}q1>pfCC#IZaR*3wp zG}6&HU(uZpFq4pU@GDWXB#p#0L@}$T0Hd{JgF}uQf^&B`k#MuqNXJ7H#s$vN;Hoqd z+z>JCQjbktN-lO5-@>BpekqV%oJIm1N>!In9<<4gZ;49fX{5%Xq>=e5abipOxoPCg zp?N1vbGcT+KZ>?1(#WAhOEe4C#@aBGp{y`7jl4QE^*48?Hup+-kfZPi(d_gza_dl1 zM-FBCDhaYMjr=-wn5;)n$RJu>mPW#zo%yPWFOY`FS4F)UX{6&>gnH)C3b`IrN|8Vn z=?gd|9C3rWmV@7kRx8rT(!+9>*-Z@=;e;7MR+yMZ9!pxg7I&fiT(mYf-NgsZDG=M` z>0b-jdFf8R3Fm>O=_-MX(;Yr#0fxc}OlwKP%t-I&`)(GDqoK*^^C=A{+j^|29}-9{ zNbljN+@L}XdPO>1zA3n+!{sGo)4P!x3zXs_F4E-k_mk4QDB~0I=GjhPnS6U(dMDpx z%ri+AOiQ0d85WU^EX%YgA;zS4@LXI>Jd>{%r?>OvdJNXPX9be;)Ah)`1%eN1=s_-D z{!Pc4%a=pv&*jU@-E;Z!GS^(byeLmDUtX9mmoG2P$>qz#*~RBAO4st^^(Z97#CTfN zDM_Enw`@?*#YXm&fSZ-B;U@*)axG;NY2*9GXdZV0$}1u|nVqiUw?pKCm4K7dr}Jq~GfEf} zW$CzdC7<@Du^Kti#mL|i4!Su=a5g+ug(#k#Udy`|#pO<2u54#v8W(A7MmtjMD9_^$ zAgfGBui>fD%p{g|>*kAYk1ty>|B~a%R?MqJOO-L%8KtMB&oL8WOEnE`gD(iw7N#3` zQshCmFxnJf7FFh@PvzHIxN}y_1dqhk9H+q&S91vW#MPqqs=SG-ImEFiuIBK^l($Rudygb_jpv4RN57i5i9C?i?o ze#-7ZFIQ!dit)JFU5?YJbgP-(-)xLs8KUJa+h8_#u5Q>M^BKzNe1wS8re{gSbRWM-u z+<3;g-RLw{8B<_!|83y;z-I$T0-b@ifsy_{_@DM4@%JG+|LoW|Vz|_s;cLTjNa@o&p)`XOkd&`EpG)Zqf$6Lacsfc6>jtSxuxS~vc$A)>Y~F%V zr!Sc&8k&~@o5xfTO+D}f5-@c6k`-?!fEQ)J`jK>hbbFrcU}^@;;9S%Zx>QO&ZO33n z25cdzDFlFNzIFi3$$$;CZUu1B2+IyjGut>3x!f+lNUDb=WLah_PurUXDF@YoLTP4; zYr9f$EZd!v+3Y}8e3+$0mZ0-8n>hKiP;e!GS*FevUx$8KHa$19k;_0?P1}^s5_VC> zAPVh7^-wZU%fM3C4~(Si_cd1zK*CK}g(LeWh{2K*_>rM4Pwx@_4+Sq3~S<$^XV zxazDW^x6!#*8LW=WD1X_z?Lnn%7C4v3n07fL5%J1S-i)o1e2@)#5{CwlkvCXKLYHSIa@MrcilGo%>>ak0TmsecnIbL@ z^6k}jUDTw^1d_9CXC&NMpm*k+;n>W0o}n7@lKuVo%sAeja5j)NMrX$2GzD)+$?E0( zQAL?CkRNgV7$q1tle>#P7G*~B<0{uw;9_3jcpZ;?d4a>Zn-_T5jKR$d9OyXA z3%tTHnioXt$825@I1g%G;6O(+FL0=1FfVYx<1;S^6b3ghaHwN5FL2nSn-@4-9`gb( zoSK19Pka%M`o4Z!rV}xHXJ;S`(@AL0hCz`eP%F}wD3|QS%qg6urJJR*3rjMq`Tj(O z<`DsvIxc%+*%o9Yvonx~skb_Jza?fAf=fl{J?`Vgsazw?6owr zSVA%`!9EHO@=wn!;@t}W4e8Y(8!OH%czS*e46&V@GNhicbm5qHv$H|_xL>Fk;WH|$-zazvBB-(2Lpc% z{514n=(s6MhLGCkr6pBFh#{ge8N`c>qbM0P#T-#kC`JmY!5bED@#;Hkiafg1xC z1v>QK>;D

YEsP9g_Yx11tT%@jvUo*nh5nt2WZV*gq!pqVF%h*L+{{eb{%EQRhqf z)&##9yffHu-08i+`=`)a9ODx+(Y=HalcmVeO%k`^B9*K z?fM*Jw^41Z@ZPOmV@xu%z@Kn4;S1V1c@z!owb!dmwfu1%KF3Ftdr&&hDivC^T-&u8 z0OYh>Nba4ksRm2MWf^viAWH)UX2*65kn#bRqDW|rxnPC~qui{#e~ezL%I3MMS@{o^ zW7}$!qD>ulV=4EsJRLR&yCT)il&btqD^^%vI5@0&A^5OTf;(5I05!D82D@-?E`|f-ozf^4~1GR8|SE^c$8&uDhuR z)2^Z)sd%nsnRU$ULhk>BQWuUc@p?W&1HV zs~57A&-~Uj<&!MeR9&7kUtJZr3%R*iL*|83(jmH89blu=6UUVRba5neWNcP5jJkB~ zBrc!8p(B#Xi^%Y1HO=~@OXM2K*@d|q+M0bgqpVeIRlH=fzDmvGW?s9AtVd|vXkn^_{8V^E%{ zXR%cHTyLm0uxxPDvcoICb_<)Pm_2zFck0gNdZ(%${oZ(H8n!NdU(2RcHfO4?VR>)1 z*lxg5uI4xl;=iOuSW>8?c`d17cISk!HY+c(!QPT+P1basfuz^*)_MwrXQNZQeG2Xp zP*F&C2=;G@_q2cnm{7d5o=-4a5;;5Dte%QYu7+yvA4oj676fYVj)Mg8+)M4>waJUS z?DGH||6THH7~;IQYiSMJ zO4y3Xa%59u{Ma(DWrMix( z;&}uRb+zLVwA~D-+{v~QaXgkw8;OXWIilRb_D?I!<%n`SdtYs+Z zjvh>GB&=(T_a<4KW;t>{^UqM9nW?W=Rl1%7uGOGhmC)93>XG+O?M{|QQ+82%7N4nS z?Vx~>?76sOu3pdgEOLH9i}|9{YqzpyNJrd>%QIS$RF;QkG*)h_E3I|*S$gIqeFZg} zVhdnvZ@i~pgaegsV0lO#Wi;&TLv=`C1qWQXf5lnuO&UFnu{xerFiYrygWf(IWwWbF z^*Ye17is*#n&5Uf;g=8cwc=Lh?gY7G%~n4*SKsOi*8=yno_-tTV=MIwj%7D8ZSU58 zblNz5fr4S_h$Chj`%ep^bMQ@z#^`18H9i>5^qs8>qxv+wtgc*FSB=L75q%0CoA$@S zbjK?qmnc3DzsE#QtR5L7c0{?0<@u9Emej<<#EvK*V+r~!0B5Aw5#^&SVO?&yGg$11 z@)3UYV}E0t7A}eUG^?3sJC|X2A5uTXRv4Wkvg*T(BeO9LHt>fS!lEoYHt<90gX~a} z2oJ{dvW~&om_b9xil%2Rw-=P3vCA%Gb6H*x%9AV^uQ@t!WKh1qT0nLJakaGaI0q4L z<|+5{{lon}R^=YX>&Xs9_#9pFeMGsN9Z#|_H(KzB@^N-rND%S;l9N{yBp*jHUn+QN z%D3(!f5(Nix$BO7pkt4U{!sZo<71AxCq&PI)}1Ll)v@f77_mytB~pPaHP@S5Sc7g*Srg$Ubp#UpZ}7-Xd zTb0{l)0Gj{>a9I1XTCs+d(%VP9=|(SxgRfnYSkz`GqzEQSlw}OUGdN1?2|ML$O#{^gex>9?|}xeH(Y; z-KJflb!t1ccWEWQJABvsF7b8x&h@SLmH9?{|EMY6SG_MFQaGkBU--4) zw}M~s?+ky@_g46Uz??uNbZh8Kp?8PQ5BowpLRF#SP|W|wz%BkCgcRH}@VMt2`tsmU zL%$4OA8bWFz~W%cKgVAb^!k??e?)eHPa9Y3y8`{buLeE`B3x^NPW{ABS$Mh(2F}*eDECq1k)>&AJc@?qZxp_2fd?6Wb}iBeC`M46UX}{!{S>HY z(wg6L4)wxql&VT7PHrVcQwI}PxIxm9{^A2fpkzC))Q;N{FT9^3z1^jy5tvYKq*9wl)(td!q1%MKWLd_pCm_Vb!)J;G6*T>OiQ)jie6%(fT@VP84TG1!yUU11Nf=&w=z6UV{9xAx zyoYrRk%Mm11K6d6UA8`KfVKjsGAu(enE5qfY1$~IuMso8#QT_URC`tl_Y90_Oef9*L_KaRh-au>abd~2BDE=Qyx#ab7@<(;Vzxu&Y|RFOmEdd zRI;A~g#RuY0&_(xFWVo!M7w}lvWKd|L`iDJcIX#acIPK30mfPNGKR!b2c1Dx&DnD{ z#lIhDLUzC;m)Jf`6HPcLs70YKu(*5k>UD!^n!hK`)N0kR1{vYR~lEP{*)!EmC7BuR9-)Fxo&9Ksfe8C{rg; zW~J?TgRGT%*+uSISF%X!Qp-5`yzX$-@wf{l-@Sw2E^>6p|!so!F!u^d(t2Wk+PVq)QZox-3c z!lb}*s;<;$X@K)7w$x{6vh{X@Lg>`g1z>SuN~=#$M7N8alO@2YkI}fhz&W%*>gOnJ zhwZZZ!<1u4&8_dCXmTOHweRDNcA ztXrpifgUaC`*Bt($;hQ4y78`|J#g+5BBEEa>^nP&}S=&`8V;_rM z6x$K2K-T|XMxTt{6@72CJGuo@x-ar_1n37Nj!P+&)YDDeJ3 zZ=fa+@xSH&w*L`;7NmZ=-jC{!jgRSn_Yw&(qiGBed7G zr?l&}1})(EvFAqc2w(JU_mrxCKB=PUzhUE^_f!nPGrSh=lX>FVM21O3u03{V>85AC zHgABu!%crnItpEmnGwRlrD6ci;nce0{G#K-d926^pm9X0qzX8r<_^G}TRd6QZ~v?mg5(E$A1$cfGr@0&VOZScurTw^*U|< zVFg9hW>ZB=PfC^etQtTZ0gJ{Imm0}%3_fA^w&PYel0I^Ko;!fB0*crXA1p&Ga$%

JV=gpFM5>aRpX>6jc^189)d@J*t{sj&fbO&39RA`~U(8C@N0qCvOrR z%^N@<0fndyqO81G08bx4Tmg+&j^@$^f)E1&$@T3Kt&|NQ)__*ipvD>jJ$3*w2$W!f ztGaF!?~4Wye^88w1xxR>Sd@()7)5ao0iV!;$&!f!Bl+fP(}!Its!bah!G|SVIVK?{ z4#aqMo2E<_P8f)yS9}*gw8;lev|Kb0F;57ohLmL!2O!1Les9YuCktl|AP#``d$*fT zBvjErkT*~K16SCM5nYcPF!)U3WvVQgJ`ms|m7~gQ5Ov27z`K7EdKY@VW>GSAz{efz zu`cXNJ_%4f;N>3Ixf(wSGjTvSscEVIWZ|>{jSoh)E>J>D8SwDfV5eeGmd_nf`HIZ1 zC6utU2NcfJSV~;lI4sXO@!`dpOZazLfYOIfz)a3u%p;5(whr0X)XYU@&}Nobp{PGC zb0Poq9RETAF)4ExfjJl$dje3|!K}<7&g!@(v6NtwGX1;}dqzuHIxEx18<9b}a5%&% z!LN*!V8xk(oE|nX+(1Dxn}nH?Ilu>sdEMvIbW*05BZAx(@eUBpQM)LU;*+~I!IGca zTQ15Z%`$lxkL-1BriTx|%}**nqlCAbcQlPl9Z@EltTJ z__xxm;6RSA8K3FmODbp9OvrTd?L*$|BYPg7>EM-wN2Dy7mD$fnsp$ZQrm4BTgOuwz zZBNad&!?O%qS=AgtV}y^$EJ1$Q(&sh_lS8nE7O((X6er*SV?9d7bff)b4!^n;bvv_ z^5*PtQnN0>re@;iX5&<|OMoevR!(%M&Rv#I$h2_MvI$5ZE{lTETa}H09s`#5&mxBZ zapNZAj!0kRrO01n+oG>VAB(;>wlX#?`j^Ok;oGC%j9eDIGxA#W{OHEWwn$lIMEHNA zq43Yb&xY?s?!B(?w(zp>tnjFCAaGsi_n{w$J|DUX83n3CQ$xYv--53rf8bYeU*CHm2o2-YX5;(3(vr3fB{{XepYg&bvj06d^P!Q9?modCRNFz;l$(5xa*q1a>h zE7`3!vVKgc$l0UynEnL#`tCMp9gGQ!px`NZ=YN#+E=Z1xkCOoT;j+5q93)s?b=s9ztc&orFw4{UDa zsNK}t41KJ^-6=Ssq=&?<$`R%zEexK?pz=Cf!tRoi+RmZ{Y=zQ$XSaGzc14@)q_**) zwK-qt>I6d{t&(qtrq9%?)j=IGCMxgdLuhx!7?EJ%C(^gUbjBFb#iHKq_9-5tGkK!E z470%QPa&#w^2uN?axrPC-(qXj(L%20#yNbaFxPd%In4Xs#gUaa_C2_P z=qo(#o*v4t)F)V}U6i$nyy`wS_V!Xs&{g+FA6BA>vXyQaqsPR?DgpA+V-q$LcEp=89WUEja<#qc`&nF> zi*8SS4@VZh5g?TRuUP3*VvohTqJN6^MVCPKKQH`X zcwXrC(Bk0df;)}x89M{t2%P1A)xY2O6JM8ar1uuY5r04*tzE16Jy&=_>NQX__MxHU z_>&G52Nl($Wc-NR@F-VRR5alKTevI7^fTx_-RX1sRxY30P}knllSn4^rlz0Op6qSv zm|maiZEa69EMB;1RYNs&G)O$#o{Trt)t}yrTP7N+dV45lwc+&kE~{=uRXqaCx+~1W zv*3A0{+0;D+ZRtI^|U^nVUzCv*I-kJ;5FRZR&jb94y|#C#)Z>37VZBUEKHgRA{?H| zd*7G0_lDL)swEi>C0-_n*AZs}mjD*?6BpF1eJQHz_Hf)+Zlr0=(mo--Zy`_Re@sD`sc= z-u8G;P1oKyrfQ2tg|uf9N2B-Tks*r)Zb(c#=}2ITRMd2KCwfv_a5qPLvJESrg;0vg zcwrI8A^%c%$K$aszPG8jgHq#n_olWqrP{3NaM1+b`$?RQX782BWW2MvqaVYNkhAg7 zc-}pZf;@ZUJN|eY0KWTFv76at-+RXKzDrL;-?S4@PhwOPV|nA7PekKpqgh+b&i1a> z#6jHa(v#@M#%_#ZJ!Z5M&|`(!_PTg-e=5d7cWUgR=L3ZD1HOf0ro;*TieZK8P<=@{J;V3LPE)*oI zR{=Jn3Y-xQ^69-@tw_*evTTCDePP}~({Xix_Kuxdq0Y@+x(3p(=y`vL_q_h(_H0h- zK#=v8QSFfT5!27@N+bMcM?59zJsg&VV&bw3-{gQZKWBKof*c^0CA)R8wKq8ODYyM z*y1Xzi*Z{!I!x#bE4%t}iLmFO#yek+6tZup^L6p&-hIc|HG>r$*LZlxIHHc5cAMe{ zQ&^)Age%TOrkHA#jlv~wqovc4tf!^FswtW3h?D$3QTehG>y5q+tNsI#h2f8c=i<)4 zVDQtynef`58o0s#3;#CX$Gm^_Zq~2Ueu1dVE$ZFs3{)B#f6h6D6Sd4f)pyoxTDN&; zy?u#RZ*P$2RKtO#kYAT9Tml_}wSwy7JqOxb;&tTiEP2+nXEksCgztM|2|G0v?numD z#gNFXv47;26R0k+sNfU`%pI?CFBB~{Qtga4n*tmwv8c6fB}epxYf>bF4O|aos(}u$ z)50X$^fQ{85nDK&SW){HZm&^MNjJb%aL5X?A}C2rv-KIZE7)fYoHX^6{2Z}w3vq@Y zBH8pl<}a{Sx4x!yIUf|nGo9E|NPdOepqzocNcK_K3hf=-v)JHNY=|mh3T4Q z9Mk-g_lZ6)2dDcWIJ;7M&Qgwl<%y=W*eoQ)k8R#?=E)V+(j_N-Qdr26UXbZ$E8N&k z;`eiktw~X~m}8os<^I=BiovlRv4~?k@c9Kf`#oC&k2Z5EkRTTjnj$Wf{~ z(I~ymS!v?uIxT|>IIj7{_~RVxw;}OJ!cC~)$P}Mw!pX66+c>w!n>uNKw4Jx&m0T2U za&a>qD&@2}X{?xYU`+PcoSWH}!V70}GUT^QoKP}2d2u8tD&e#! z{TIy>(@<&Kh=l19O7Ql5UE5*Ml2ar-aTdp}q+r;Y8euo&jF}v{{FZ0rdT&Zr1H5m@j4mcQEfjPDfh)82ahclrVC4ec*rjr`2SaiHP;=|Hf;>Q*p$ z&D(g#%iiht*KFL_-nGAijXCMYZTj)Lr$BzT>>W=i(fPKx$RlMf zE4_|)pI_;mfp>5c=kexd%wMGT+!i;z z&#YC{v;L%yVikNjld#Q6k{&pN_nF^7`i_?lEty0zUQc%u$bQqFYTj{vv(7v2*t9{$ zrMHCX0w|HVcpb-~^W?2qfrG1~S$`Gp|0M1hTojAz~^e9<1c(RR6U zN%9^tkMec$@+Oz>PrZwyaI*Osr{`nm8s2N|$(xWg10Y zw)npGB<$Cf``~)LvpvsGg>L(Ya-o1IDe=k%sz zd&@RTA6rSmF2~eO{_50e*<{=c@;9|C?%L<^bb!{BPf~05t7A8-Dv_ohO1wdvAOORQ zinmQe#gtlg5wExofkLTxmF4AA+0|Mb@2AL(LE9Uo9_!|PQ_xL>ccZF$6?}VA@%nhj zUP2_^YU^;&ZU(7G-OQVdMR&Jal^zMAqMLQqXH@RkxV^EyX8oqh?K`$rH&$-kyuN1J z_6>D|cO&o69;5}2a&>7j`dgu>yVIr44$(_v<)+GwySLX=)i>_kysfr=OJ!AcVFx_aAFxtmaw8~ z3i-vPt?cNihffJPR=T`GwU>-?961K?q!tB?Stsie;CCCDhK#k$Z=0hP7>=6 z2RZ_4WN$7-XeMry!(mGFTGQ2r^vUaxtcS&1Wjz9Xov{S|PX>m7kVN^k;0fe*I@I)q~sQs=uFU+Kc$oh{Y7wP5Ob)yvpdz# z{z5bN8e8EvU~JyZus5ww8|jvS4NXbhl5~1J-c{WfZ|Q~4`EHB?k8;ZofF?g`l*8^X zMtT#H8DS~6w$qGk>d1No_y>(;FuluJ%=@zUV|H(Ug=#k$B`}g3GiNfyx8ZKi1966y z?I*ylHm1R1E$CR;+R9+WzW}(xsDPcB&ly#%K|1f&I7Q`DVhJ6HXFnUMON^;71oH+} zRSbyu*GOGxl*6Kn_AoB0YWs4C117NX< zr7>v|s6yBwf|X_s`^QLi8dG6M%-`90##|Ws_?%MJb?r#d4dJyP(T+X+&ZktPe=@9Y z`8wKdOoiDie@B~)X)r}$-7zAON|W^=f28XClM$$yule=HR0K}uZ+;b8wwk9MhBYsL zq*nSDBI?jsuz-;%-im8mFX*Ma_8L2zQiu=Ceqp5Md*>lk5Ts!>s@mc$`)l@^%iQ^7 zq-J>MBdm`v09D2HFz`QXZ9IUx9}obT^=PD~c*i07&KNtEwcL(?Ec(w#P4rGiK%5|f znK#1x$4HI$&OxXe-%3;!dPz$3$+vySgOM8Toq(`1zW7Z@24SRP-U*2Kaw`dWOA(J{ zl#~D`kIL)}rT}ICZS*Q1Hcmrqm9cg$LtY5|_1*Cn?u3msPgKuaVD#=V<|9^05K98K z^t5-UNMl3S7RBrNqtRPpEJQey7&sgPyreQ&&n{NUd13TUH8vs+$*8S$U?{~w9;2gyD-oo^tExD&`Pfw@y8CaZ~WUny~(IH}0ih7kB?KSw5k=$!6Mi2+*D6BpDmppxcSHm9M3zT%Wc$Ab(*GIZitspi2D}O# zz(b+;g;L1(zbG^^_~qc0!9Brp<1fayjeCtNj3(nWqbTr`*f#?o4vfdWewDtLeRulm zy+86^ijIc=3{zm30>c#ex1+#{Gqw4W!E8`cT;>p)mwq7W@IhoqZuEQ9}f1$QAtsg>%1=m)t z?FVt+iMDd(K{W8T6ptkbz&n{r4RDvlI1%4y`)M3j9yO-*3Koo%W1P_z{{rA)qa;Yq1(rQQOFvPzAxWSm!!`cBa=TW!Rt|#7?aI*b8tX$`xeF31D zw5aBn77y*x0 ze$+wMqjC65V`V3xnP6pM1Y)HUDJWv?bsR5^!!wPI9jJL?`1Qk6jH&z4T0Wlou-_;- zALS(_jHj;e?b=+A5g_#JzYqKT6Wak+RK&!nZJpbD;&J}Bap;hDdK=1^s#8_#_Crk! zl}=LrFb?&2C+q_R(=-zVirHVrq4T{H_F5%XZHQE5e;J1wy%Xa0lFbLZs@m|EacG-& zLaSA>J<;3J#{M!6ReQ&^pu|*okj{txGY*~N9oKBVuY*31{xc3O^p0z?-jn1={~3o$ zytB^3d$w3e)7IS&y&cSLa8QyzjYAP{X(OPHrpoA3?l#t(3&2xOVL~>vb^8#cU@n}F zep-avw$$Z$ZS*Nu8WrcDI@3K{Q;9So_E{vJjlNTjg$;l=RZuyKi<}om-%?}s9@JR1 zij6KeXaOazn1im8`f-12^pzW{&$jAvD^+$~_qRsh7-QpZ)IBj%y>Fy3br)L8H`V*R zM#)Z;n^PS#qqh%;(zn@v_v!vpxX&JSSPv>kjHPgR&E9mNIK^G09rmHIy)q7-Z!Ct( z>TwDF2hTI+!@pE)RAR5;ds)`jmNX8|G3LW5R6u9#V_A>J!D+@uxObkQF*rEMm+YNjy9b*8SlN*V-$T!AI`eBVVRq28mSP^-PxNWTT%s5CN#{LF+UQV zjz2RN^axk6)eSSMRyT+=4m?d?gYZRZ=!RZB*4-0)Kf?7LZKxRg>icO@~!I9eDGDHw!>Qr_CQfommf;(Fmw17NML1kB0 z0=J1l36>l>NRi=dNFr9qne7<1YSQDvStQ=d{6vP%(7^!O2^--7cvlY@Vv#W(#{%5G z!B!L$ExIzf7nTq#x5DB(Cf7;w#N~XHqW=5v(BZw;*ly z5W$8yk8H=p9x8*j9d12LO6g%(Z6rrm@XSMmTs*dAXiRTLpCPs22qVgYR5g+VAV zR|hNGOySrxhD;n6f%fdT`P>(Hu52l+_JC+IB-k=SWLq4UDJI{Jt{w7!Sr@K!J0Zq< zLIrV@y$D?BFYxRTdEF7iRp@=SKGCutmkbt|AV}DcTtl!q7aord@g@WgTb>?;#|Sot zzCzDf43L;9LuH@@J_S5^dOOMgr=W{Rpl}uo!Yw2!o15v93hwq(xG{kzm+7Kafdot4 zqz5SU`mv!jBw4s4dT|Cv) zN}eu5Vp0zYZr3)4$SG_{7+@%a8M$#J!G{xPQ_$hEP~pFL+OMI36ZcanBzgE1%*Ht~R5b*MR=`cj9JZmc+D&A5PN71wgJRCqAPq3W z%H(7|o`Hh=KmWvf@aMzId_Oc4NcoU76Q+ye?ZRDEg_~Ydty|U6 z)Q-EbhXN)&{oSd=zMiJ;w*HFBj(rKJo!f>WYE{WXn$@g|cF%nU^-ywRRJb8>;xQzITn9?Yio#A|hD?FkMTP^?!uNJG^$ZOcuI+8` zrekM#Ra;Y6SG?nG9Y~P>l@nzBw-74eQj$cWJEA(6FJ=KZUyCICayW_=rK@&_5K2Q~ ztT=Io#>OWnkhspT;69SJNyLyF8k2{ivL%ryB->}B4DO*Vq>TfSe6S#xc}ohor0i&H zOtch~DL)I>AjG%#b{8~HL18%;>Ydzbccj`o3hNUmZUQab+$^mAih3`w`#+(;4a5bv zgAyuE?P6i)#jbUU7O3YPY4!GA*s6EQ#qJ=hpk=o*d!avc^91)V467D_prvWd|e zZ|>b!W+hBx&#Su=JuID?^?ol*`!E*m>sq~R(PHa$qC170ONZj+wpYEW)rscw8yZ;N zlALU5Ha(>2+Ld7JkJSLbW^Q%7FK)e~ODljq-BDrdsyDTFWuiCL-J7x?TXDBnPbcoU zp=2($59?^^+6Tdk0*zO%tXL>3K)nP-d}l>VQob4ld%Crn62l=c+kDkG=mZH@{CHxZb24p#CO&mmCJeYA)6mQkj(BPIC zx}gErD!sk&p202ysitwYNDc}O=lmNy^{0ba!#V%Dv*uFQaL&Ks zoPSXIJLd&$yV?9`k6>L+e!^aRj=kZWe_2B#^ZX?dDkE|u4yhDmlw@Q$=O1K|>{TU) z3oIaA7|F874Uu(5#>nBEe>p<2ncT@*X~~M=oPW7fyKt&Lk#Vo^7z_zFvqI(kvrh@g z{|k8LeP@Qf;hcY#I>1SpAz`LQa;4#%f2kJxOy;vVi!invYK7Z$vTz#C`8QNX#fHal z&cAq`4u*!q=y1-zEE#_|=O1%UDxgw>5{8j~ z)Nsx}y8Dc%F`V-+pIV2gGo15}OF2Y~x1aN`%a-pC^8doR_LiPRGO-u4@Axn5BUEI( zT;XIajs_2P8qWD=ZYFR)9V)5hhI9U9TNr4@Vt_=By`eHt0-pjd<-#X-kohHsbN=xI z$Ch?$IOm_uQ_Mz};hcYN3Z!fT4d?tj3YCU){^i)>ZF6Wi=U=YL!=8R{IOkvP>@lka4pb9E Date: Fri, 15 Jan 2021 17:24:29 -0800 Subject: [PATCH 024/175] bug fixes --- .../installer/scripts/tomlparser-prom-customconfig.rb | 4 ++-- kubernetes/linux/main.sh | 9 +++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 48a3ce9f7..54c2e23bd 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -195,7 +195,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) setRsPromDefaults puts "****************End Prometheus Config Processing********************" end - elsif controller.casecmp(@daemonset) == 0 && containerType.casecmp(@promSideCar) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + elsif controller.casecmp(@daemonset) == 0 && !containerType.nil? && containerType.casecmp(@promSideCar) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? #Get prometheus sidecar custom config settings for monitor kubernetes pods begin interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] @@ -254,7 +254,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Close file after writing all environment variables file.close - puts "config::Successfully created telemetry file for replicaset" + puts "config::Successfully created telemetry file for prometheus sidecar" end else ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for prometheus side car, using defaults, please use right types for all settings") diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 7dfe6259e..148bbc0f7 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -204,6 +204,14 @@ fi #If config parsing was successful, a copy of the conf file with replaced custom settings file is created if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + else if [ -e "/opt/telegraf-test.conf" ]; then echo "****************Start Telegraf in Test Mode**************************" /opt/telegraf --config /opt/telegraf-test.conf -test @@ -212,6 +220,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then fi echo "****************End Telegraf Run in Test Mode**************************" fi + fi else if [ -e "/opt/telegraf-test-rs.conf" ]; then echo "****************Start Telegraf in Test Mode**************************" From f5d297b3d2db14d63eaa2440e9ca238db6246ee9 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 19 Jan 2021 19:08:51 -0800 Subject: [PATCH 025/175] new changes --- build/linux/installer/conf/telegraf-prom-side-car.conf | 6 +++--- .../linux/installer/scripts/tomlparser-prom-customconfig.rb | 2 +- kubernetes/linux/setup.sh | 3 ++- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 87f20e6ab..e347340d8 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -73,11 +73,11 @@ ## Logging configuration: ## Run telegraf with debug log messages. - debug = false + debug = true ## Run telegraf in quiet mode (error log messages only). - quiet = true + quiet = false ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + logfile = "/opt/new-telegraf-logs.txt" ## Override default hostname, if empty use os.Hostname() #hostname = "placeholder_hostname" ## If set to true, do no set the "host" tag in the telegraf agent. diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 54c2e23bd..b8e2cc15f 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -209,7 +209,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) checkForTypeArray(fieldPass, String) && checkForTypeArray(fieldDrop, String) && (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby - puts "config::Successfully passed typecheck for config settings for replicaset" + puts "config::Successfully passed typecheck for config settings for prometheus side car" #if setting is nil assign default values interval = (interval.nil?) ? @defaultSidecarInterval : interval fieldPass = (fieldPass.nil?) ? @defaultSidecarFieldPass : fieldPass diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 352be06d7..eae6b17d8 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -60,7 +60,8 @@ sudo apt-get install libcap2-bin -y #service telegraf stop -wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf +#wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf +wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-1/telegraf chmod 777 /opt/telegraf From 490e25e2f4b33b3ca10fadd092bbde7790c54594 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 10:49:05 -0800 Subject: [PATCH 026/175] fixes --- .../installer/conf/telegraf-prom-side-car.conf | 2 +- kubernetes/omsagent.yaml | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index e347340d8..55c201458 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -748,7 +748,7 @@ metric_version = 2 url_tag = "scrapeUrl" - + monitor_kubernetes_pods_version = 2 ## Kubernetes config file to create client from. # kube_config = "/path/to/kubernetes.config" diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index ab307e010..9c7c9d866 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -507,14 +507,14 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true - #livenessProbe: - #exec: - #command: - #- /bin/bash - #- -c - #- /opt/livenessprobe.sh - #initialDelaySeconds: 60 - #periodSeconds: 60 + livenessProbe: + exec: + command: + - /bin/bash + - -c + - /opt/livenessprobe.sh + initialDelaySeconds: 60 + periodSeconds: 60 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: From 6e6d58715163778acb3a215d16835cec3b09f217 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 11:42:34 -0800 Subject: [PATCH 027/175] adding pods version to default setting --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index b8e2cc15f..787f8069c 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -89,6 +89,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] interval = \"#{interval}\" monitor_kubernetes_pods = true + monitor_kubernetes_pods_version = 2 monitor_kubernetes_pods_namespace = \"#{namespace}\" fieldpass = #{fieldPassSetting} fielddrop = #{fieldDropSetting} From 064e621a93bfa816f4fd793d22d425c9bba6a4ed Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Wed, 20 Jan 2021 12:40:06 -0800 Subject: [PATCH 028/175] add label and field selectors in config map for custom prom scraping (#493) --- .../conf/telegraf-prom-side-car.conf | 4 +++- .../scripts/tomlparser-prom-customconfig.rb | 22 ++++++++++++++----- kubernetes/container-azm-ms-agentconfig.yaml | 11 ++++++++++ 3 files changed, 31 insertions(+), 6 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 55c201458..9b2931e94 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -742,13 +742,15 @@ ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_SIDECAR_PROM_MONITOR_PODS + monitor_kubernetes_pods_version = 2 + kubernetes_label_selector = $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR + kubernetes_field_selector = $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR fieldpass = $AZMON_SIDECAR_PROM_FIELDPASS fielddrop = $AZMON_SIDECAR_PROM_FIELDDROP metric_version = 2 url_tag = "scrapeUrl" - monitor_kubernetes_pods_version = 2 ## Kubernetes config file to create client from. # kube_config = "/path/to/kubernetes.config" diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 787f8069c..e638d0bb4 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -23,6 +23,8 @@ @defaultSidecarFieldPass = [] @defaultSidecarFieldDrop = [] @defaultSidecarMonitorPods = false +@defaultSidecarLabelSelectors = "" +@defaultSidecarFieldSelectors = "" #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @metricVersion = 2 @@ -67,17 +69,19 @@ def checkForType(variable, varType) end end -def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) +def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) begin new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = #{kubernetesLabelSelectors}")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = #{kubernetesFieldSelectors}")) rescue => errorStr puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" end return new_contents end -def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) +def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) begin new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") pluginConfigsWithNamespaces = "" @@ -91,6 +95,8 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu monitor_kubernetes_pods = true monitor_kubernetes_pods_version = 2 monitor_kubernetes_pods_namespace = \"#{namespace}\" + kubernetes_label_selector = #{kubernetesLabelSelectors} + kubernetes_field_selector = #{kubernetesFieldSelectors} fieldpass = #{fieldPassSetting} fielddrop = #{fieldDropSetting} metric_version = #{@metricVersion} @@ -106,7 +112,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu return new_contents rescue => errorStr puts "Exception while creating prometheus input plugins to filter namespaces in sidecar: #{errorStr}, using defaults" - replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) + replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) end end @@ -204,6 +210,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] + kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] # Check for the right datattypes to enforce right setting values if checkForType(interval, String) && @@ -216,6 +224,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) fieldPass = (fieldPass.nil?) ? @defaultSidecarFieldPass : fieldPass fieldDrop = (fieldDrop.nil?) ? @defaultSidecarFieldDrop : fieldDrop monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultSidecarMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultSidecarLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultSidecarFieldSelectors : kubernetesFieldSelectors file_name = "/opt/telegraf-test-prom-side-car.conf" # Copy the telegraf config file to a temp file to run telegraf in test mode with this config @@ -234,10 +244,10 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - # - to use defaults in case of nil settings if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) - new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length else - new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = 0 end @@ -252,6 +262,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export TELEMETRY_SIDECAR_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectors.length}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectors.length}\"\n") # Close file after writing all environment variables file.close diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index aec1bb456..e38d9b4ab 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -76,6 +76,17 @@ data: ## ex: monitor_kubernetes_pods_namespaces = ["default1", "default2", "default3"] # monitor_kubernetes_pods_namespaces = ["default1"] + ## Label selector to target pods which have the specified label + ## This will take effect when monitor_kubernetes_pods is set to true + ## Reference the docs at https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + # kubernetes_label_selector = "env=dev,app=nginx" + + ## Field selector to target pods which have the specified field + ## This will take effect when monitor_kubernetes_pods is set to true + ## Reference the docs at https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ + ## eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + [prometheus_data_collection_settings.node] # Node level scrape endpoint(s). These metrics will be scraped from agent's DaemonSet running in every node in the cluster # Any errors related to prometheus scraping can be found in the KubeMonAgentEvents table in the Log Analytics workspace that the cluster is sending data to. From b4e4eb203910ea0c4f2a1475457333cb0d3990b8 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Wed, 20 Jan 2021 13:08:49 -0800 Subject: [PATCH 029/175] fix label/field selector defaults --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 4 ++-- kubernetes/linux/defaultpromenvvariables-sidecar | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index e638d0bb4..d47259e5c 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -73,8 +73,8 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern begin new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = #{kubernetesLabelSelectors}")) - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = #{kubernetesFieldSelectors}")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", kubernetesLabelSelectors) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", kubernetesFieldSelectors) rescue => errorStr puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" end diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index 71f711e19..cc72b80d2 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -3,3 +3,5 @@ export AZMON_SIDECAR_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" export AZMON_SIDECAR_PROM_FIELDPASS="[]" export AZMON_SIDECAR_PROM_FIELDDROP="[]" export AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR="" +export AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR="" From 53184e437926d9778a25fae9c496aecd7bf74167 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 15:08:04 -0800 Subject: [PATCH 030/175] adding logs --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index d47259e5c..faa5db899 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -71,6 +71,7 @@ def checkForType(variable, varType) def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", kubernetesLabelSelectors) @@ -83,6 +84,7 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with namespace filters" new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") pluginConfigsWithNamespaces = "" monitorKubernetesPodsNamespaces.each do |namespace| From 616d45345cddf0f37d954dd47d0beaf4b8f0aada Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 15:36:30 -0800 Subject: [PATCH 031/175] adding more logs --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index faa5db899..3ae83cbd1 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -85,6 +85,9 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) begin puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with namespace filters" + puts "kubernetesLabelSelectors: #{kubernetesLabelSelectors}" + puts "kubernetesFieldSelectors: #{kubernetesFieldSelectors}" + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") pluginConfigsWithNamespaces = "" monitorKubernetesPodsNamespaces.each do |namespace| From 2515a4d9fea7557990509fa0f8bec93cd4ef14b8 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 16:46:27 -0800 Subject: [PATCH 032/175] bug fix for labe; and field selectors --- build/linux/installer/conf/telegraf-prom-side-car.conf | 4 ++-- .../installer/scripts/tomlparser-prom-customconfig.rb | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 9b2931e94..c6be87066 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -743,8 +743,8 @@ ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_SIDECAR_PROM_MONITOR_PODS monitor_kubernetes_pods_version = 2 - kubernetes_label_selector = $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR - kubernetes_field_selector = $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR + $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR fieldpass = $AZMON_SIDECAR_PROM_FIELDPASS fielddrop = $AZMON_SIDECAR_PROM_FIELDDROP diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 3ae83cbd1..5e5871a18 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -74,8 +74,8 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", kubernetesLabelSelectors) - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", kubernetesFieldSelectors) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = #{kubernetesLabelSelectors}")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = #{kubernetesFieldSelectors}")) rescue => errorStr puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" end @@ -89,6 +89,9 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu puts "kubernetesFieldSelectors: #{kubernetesFieldSelectors}" new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR") + pluginConfigsWithNamespaces = "" monitorKubernetesPodsNamespaces.each do |namespace| if !namespace.nil? From dfa42647bef1bd653ffa30181eca6f3a55070563 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 16:57:00 -0800 Subject: [PATCH 033/175] adding def vars --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 2 -- kubernetes/linux/defaultpromenvvariables-sidecar | 4 ++-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 5e5871a18..9ed4ab94d 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -85,8 +85,6 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) begin puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with namespace filters" - puts "kubernetesLabelSelectors: #{kubernetesLabelSelectors}" - puts "kubernetesFieldSelectors: #{kubernetesFieldSelectors}" new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR") diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index cc72b80d2..7a2022e21 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -3,5 +3,5 @@ export AZMON_SIDECAR_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" export AZMON_SIDECAR_PROM_FIELDPASS="[]" export AZMON_SIDECAR_PROM_FIELDDROP="[]" export AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" -export AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR="" -export AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR="" +export AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = \"\"" +export AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = \"\"" From 8db8133a3c26b4269b997ac47bd8aa81325a983f Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 17:21:41 -0800 Subject: [PATCH 034/175] bug fix --- .../installer/scripts/tomlparser-prom-customconfig.rb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 9ed4ab94d..4607413d3 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -74,8 +74,8 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = #{kubernetesLabelSelectors}")) - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = #{kubernetesFieldSelectors}")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) rescue => errorStr puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" end @@ -101,8 +101,8 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu monitor_kubernetes_pods = true monitor_kubernetes_pods_version = 2 monitor_kubernetes_pods_namespace = \"#{namespace}\" - kubernetes_label_selector = #{kubernetesLabelSelectors} - kubernetes_field_selector = #{kubernetesFieldSelectors} + kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" + kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" fieldpass = #{fieldPassSetting} fielddrop = #{fieldDropSetting} metric_version = #{@metricVersion} From 8c2351a6bc08983bdcde57296363ff06bdca78bb Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 17:22:55 -0800 Subject: [PATCH 035/175] update telegraf build --- kubernetes/linux/setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index eae6b17d8..a28543668 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -61,7 +61,7 @@ sudo apt-get install libcap2-bin -y #service telegraf stop #wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf -wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-1/telegraf +wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-2/telegraf chmod 777 /opt/telegraf From 3ab6b18845fbf0099c8cceecfd13962c085b372e Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 20 Jan 2021 18:22:51 -0800 Subject: [PATCH 036/175] bug fix for empty array --- .../installer/scripts/tomlparser-prom-customconfig.rb | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 4607413d3..04f638f63 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -250,8 +250,14 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - # - to use defaults in case of nil settings if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) - new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) - monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (monitorKubernetesPodsNamespaces.length > 0) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end else new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = 0 From c1688e06639fb23b1970742eec8f8c9e937c8b7d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 21 Jan 2021 12:42:01 -0800 Subject: [PATCH 037/175] move telgraf test to after cadvisor env vars --- kubernetes/linux/main.sh | 60 ++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 148bbc0f7..ef6e711e0 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -202,36 +202,6 @@ fi #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb -#If config parsing was successful, a copy of the conf file with replaced custom settings file is created -if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - else - if [ -e "/opt/telegraf-test.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi - fi -else - if [ -e "/opt/telegraf-test-rs.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-rs.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi -fi - #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then @@ -539,6 +509,36 @@ if [ ! -e "/etc/config/kube.conf" ]; then fi echo "************end oneagent log routing checks************" +#If config parsing was successful, a copy of the conf file with replaced custom settings file is created +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + else + if [ -e "/opt/telegraf-test.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi + fi +else + if [ -e "/opt/telegraf-test-rs.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-rs.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi +fi + #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then From 344b1917aeb277f5b1200b27e38deda445ea0b40 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 21 Jan 2021 16:39:50 -0800 Subject: [PATCH 038/175] add type check for label and field selectors --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 04f638f63..eb6688c5b 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -221,6 +221,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Check for the right datattypes to enforce right setting values if checkForType(interval, String) && + checkForType(kubernetesLabelSelectors, String) && + checkForType(kubernetesFieldSelectors, String) && checkForTypeArray(fieldPass, String) && checkForTypeArray(fieldDrop, String) && (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby From 9ca455efaf855e4d93565275bca8b8a86cdf863d Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 21 Jan 2021 16:54:03 -0800 Subject: [PATCH 039/175] use latest telegraf --- kubernetes/linux/setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index a28543668..db1439658 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -61,7 +61,7 @@ sudo apt-get install libcap2-bin -y #service telegraf stop #wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf -wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-2/telegraf +wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-3/telegraf chmod 777 /opt/telegraf From 73455927c08de2a9518ff9f7c98366fa85a8a706 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 21 Jan 2021 18:13:13 -0800 Subject: [PATCH 040/175] add OSM configmap --- kubernetes/container-azm-ms-osmconfig.yaml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 kubernetes/container-azm-ms-osmconfig.yaml diff --git a/kubernetes/container-azm-ms-osmconfig.yaml b/kubernetes/container-azm-ms-osmconfig.yaml new file mode 100644 index 000000000..acb5ebd92 --- /dev/null +++ b/kubernetes/container-azm-ms-osmconfig.yaml @@ -0,0 +1,17 @@ +kind: ConfigMap +apiVersion: v1 +data: + schema-version: + #string.used by agent to parse OSM config. supported versions are {v1}. Configs with other schema versions will be rejected by the agent. + v1 + config-version: + #string.used by OSM addon team to keep track of this config file's version in their source control/repository (max allowed 10 chars, other chars will be truncated) + ver1 + osm-metric-collection-configuration: |- + # OSM metric collection settings + [osm-metric-collection-configuration.settings] + # Namespaces to monitor + monitor_namespaces = ["namespace1", "namespace2"] + metadata: + name: container-azm-ms-osmconfig + namespace: kube-system From a6620832de216ea2047a73e0295db7c94a55f8bb Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 21 Jan 2021 18:32:21 -0800 Subject: [PATCH 041/175] changes for OSM configmap --- build/linux/installer/scripts/livenessprobe.sh | 16 ++++++++++++++++ kubernetes/linux/main.sh | 5 +++++ kubernetes/omsagent.yaml | 7 +++++++ 3 files changed, 28 insertions(+) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index e3f9fb475..7d5caeaf0 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -38,3 +38,19 @@ else exit 1 fi fi + +# Perform the following check only for prometheus sidecar that does OSM scraping +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + if [ ! -s "inotifyoutput-osm.txt" ] + then + # inotifyoutput-osm file is empty and the grep commands for omsagent, td-agent-bit and inotifyoutput file check succeeded + exit 0 + else + if [ -s "inotifyoutput-osm.txt" ] + then + # inotifyoutput-osm file has data(config map was applied) + echo "inotifyoutput-osm.txt has been updated - config changed" > /dev/termination-log + exit 1 + fi + fi +fi diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index ef6e711e0..fd4752cf6 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -32,6 +32,11 @@ sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log #Run inotify as a daemon to track changes to the mounted configmap. inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' +#Run inotify as a daemon to track changes to the mounted configmap for OSM settings. +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + inotifywait /etc/config/osm-settings --daemon --recursive --outfile "/opt/inotifyoutput-osm.txt" --event create,delete --format '%e : %T' --timefmt '+%s' +fi + #resourceid override for loganalytics data. if [ -z $AKS_RESOURCE_ID ]; then echo "not setting customResourceId" diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 9c7c9d866..710201e5c 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -507,6 +507,9 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true livenessProbe: exec: command: @@ -574,6 +577,10 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true --- apiVersion: apps/v1 kind: Deployment From 83fe6836c2f32f6e8a1129b65892875e0eff85a7 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 22 Jan 2021 18:25:31 -0800 Subject: [PATCH 042/175] fixing bugs --- .../linux/installer/scripts/livenessprobe.sh | 34 ++++++++++--------- kubernetes/container-azm-ms-osmconfig.yaml | 2 +- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 7d5caeaf0..561390435 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -26,31 +26,33 @@ then exit 1 fi -if [ ! -s "inotifyoutput.txt" ] +# if [ ! -s "inotifyoutput.txt" ] +# then +# # inotifyoutput file is empty and the grep commands for omsagent and td-agent-bit succeeded +# exit 0 +# else +if [ -s "inotifyoutput.txt" ] then - # inotifyoutput file is empty and the grep commands for omsagent and td-agent-bit succeeded - exit 0 -else - if [ -s "inotifyoutput.txt" ] - then - # inotifyoutput file has data(config map was applied) - echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log - exit 1 - fi + # inotifyoutput file has data(config map was applied) + echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log + exit 1 fi +# fi # Perform the following check only for prometheus sidecar that does OSM scraping if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - if [ ! -s "inotifyoutput-osm.txt" ] - then - # inotifyoutput-osm file is empty and the grep commands for omsagent, td-agent-bit and inotifyoutput file check succeeded - exit 0 - else + #if [ ! -s "inotifyoutput-osm.txt" ] + # then + # # inotifyoutput-osm file is empty and the grep commands for omsagent, td-agent-bit and inotifyoutput file check succeeded + # exit 0 + # else if [ -s "inotifyoutput-osm.txt" ] then # inotifyoutput-osm file has data(config map was applied) echo "inotifyoutput-osm.txt has been updated - config changed" > /dev/termination-log exit 1 fi - fi + # fi fi + +exit 0 diff --git a/kubernetes/container-azm-ms-osmconfig.yaml b/kubernetes/container-azm-ms-osmconfig.yaml index acb5ebd92..1743959fe 100644 --- a/kubernetes/container-azm-ms-osmconfig.yaml +++ b/kubernetes/container-azm-ms-osmconfig.yaml @@ -12,6 +12,6 @@ data: [osm-metric-collection-configuration.settings] # Namespaces to monitor monitor_namespaces = ["namespace1", "namespace2"] - metadata: +metadata: name: container-azm-ms-osmconfig namespace: kube-system From b3a7eabe46b33ae37bda3e2aafc7819e66a2558f Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 22 Jan 2021 18:43:16 -0800 Subject: [PATCH 043/175] removing comments --- build/linux/installer/scripts/livenessprobe.sh | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 561390435..61b6310c3 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -26,33 +26,21 @@ then exit 1 fi -# if [ ! -s "inotifyoutput.txt" ] -# then -# # inotifyoutput file is empty and the grep commands for omsagent and td-agent-bit succeeded -# exit 0 -# else if [ -s "inotifyoutput.txt" ] then # inotifyoutput file has data(config map was applied) echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log exit 1 fi -# fi # Perform the following check only for prometheus sidecar that does OSM scraping if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - #if [ ! -s "inotifyoutput-osm.txt" ] - # then - # # inotifyoutput-osm file is empty and the grep commands for omsagent, td-agent-bit and inotifyoutput file check succeeded - # exit 0 - # else if [ -s "inotifyoutput-osm.txt" ] then # inotifyoutput-osm file has data(config map was applied) echo "inotifyoutput-osm.txt has been updated - config changed" > /dev/termination-log exit 1 fi - # fi fi exit 0 From 6c6ca2de597150f3a4417da40f3f25614d9beced Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 25 Jan 2021 18:39:39 -0800 Subject: [PATCH 044/175] configmap parser changes for OSM --- .../conf/telegraf-prom-side-car.conf | 1 + .../scripts/tomlparser-osm-config.rb | 136 ++++++++++++++++++ .../scripts/tomlparser-prom-customconfig.rb | 3 +- kubernetes/linux/main.sh | 15 ++ 4 files changed, 154 insertions(+), 1 deletion(-) create mode 100644 build/linux/installer/scripts/tomlparser-osm-config.rb diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index c6be87066..b86713464 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -772,6 +772,7 @@ $AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER +$AZMON_SIDECAR_OSM_PROM_PLUGINS # ##npm diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb new file mode 100644 index 000000000..ddc5a7ca2 --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -0,0 +1,136 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end + +require_relative "ConfigParseErrorLogger" + +@configMapMountPath = "/etc/config/osm-settings" +@configSchemaVersion = "" +@tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + +#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering +@metricVersion = 2 +@monitorKubernetesPodsVersion = 2 +@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" +@urlTag = "scrapeUrl" +@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" +@responseTimeout = "15s" +@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +@insecureSkipVerify = true + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-osmconfig for osm metrics found, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map for osm metrics" + return parsedConfig + else + puts "config::configmap container-azm-ms-osmconfig for osm metrics not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for osm metrics: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +def checkForTypeArray(arrayValue, arrayType) + if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) + return true + else + return false + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && + !parsedConfig[:osm_metric_collection_configuration].nil? && + !parsedConfig[:osm_metric_collection_configuration][:settings].nil? + osmPromMetricNamespaces = parsedConfig[:osm_metric_collection_configuration][:settings][:monitor_namespaces] + puts "config::osm::got:osm_metric_collection_configuration.settings.monitor_namespaces='#{osmPromMetricNamespaces}'" + + # Check to see if osm_metric_collection_configuration.settings has a valid setting for monitor_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + if !osmPromMetricNamespaces.nil? && checkForTypeArray(osmPromMetricNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (osmPromMetricNamespaces.length > 0) + @osmMetricNamespaces = osmPromMetricNamespaces + end + end + end + rescue => errorStr + puts "config::osm::error:Exception while reading config settings for osm configuration settings - #{errorStr}, using defaults" + @osmMetricNamespaces = [] + end +end + +@osmConfigSchemaVersion = ENV["AZMON_OSM_CFG_SCHEMA_VERSION"] +puts "****************Start OSM Config Processing********************" +if !@osmConfigSchemaVersion.nil? && !@osmConfigSchemaVersion.empty? && @osmConfigSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::osm::unsupported/missing config schema version - '#{@osmConfigSchemaVersion}' , using defaults, please use supported schema version") + end + @osmMetricNamespaces = [] +end + +#replace place holders in configuration file +tgfConfig = File.read(tgfConfigFileSidecar) #read returns only after closing the file + +if @osmMetricNamespaces.length > 0 + osmPluginConfigsWithNamespaces = "" + @osmMetricNamespaces.each do |namespace| + if !namespace.nil? + #Stripping namespaces to remove leading and trailing whitespaces + namespace.strip! + if namespace.length > 0 + osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] +monitor_kubernetes_pods = true +monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} +monitor_kubernetes_pods_namespace = \"#{namespace}\" +fieldpass = #{@fieldPassSetting} +metric_version = #{@metricVersion} +url_tag = \"#{@urlTag}\" +bearer_token = \"#{@bearerToken}\" +response_timeout = \"#{@responseTimeout}\" +tls_ca = \"#{@tlsCa}\" +insecure_skip_verify = #{@insecureSkipVerify}\n" + end + end + end + tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", @osmPluginConfigsWithNamespaces) +else + puts "Using defaults for OSM configuration since there was an error in input or no namespaces were set" + tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") +end + +File.open(tgfConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope +puts "config::osm::Successfully substituted the OSM placeholders in #{tgfConfigFile} file in sidecar container" + +# Write the telemetry to file, so that they can be set as environment variables +telemetryFile = File.open("integration_osm_config_env_var", "w") + +if !telemetryFile.nil? + telemetryFile.write("export TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT=#{@osmMetricNamespaces.length}\n") + # Close file after writing all environment variables + telemetryFile.close +else + puts "config::npm::Exception while opening file for writing OSM telemetry environment variables" + puts "****************End OSM Config Processing********************" +end diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index eb6688c5b..c95b51b88 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -28,6 +28,7 @@ #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @metricVersion = 2 +@monitorKubernetesPodsVersion = 2 @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" @responseTimeout = "15s" @@ -99,7 +100,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] interval = \"#{interval}\" monitor_kubernetes_pods = true - monitor_kubernetes_pods_version = 2 + monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} monitor_kubernetes_pods_namespace = \"#{namespace}\" kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index fd4752cf6..4cb6f2146 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -77,6 +77,21 @@ if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/ echo "AZMON_AGENT_CFG_FILE_VERSION:$AZMON_AGENT_CFG_FILE_VERSION" fi +#set OSM config schema version +if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then + #trim + osm_config_schema_version="$(cat /etc/config/osm-settings/schema-version | xargs)" + #remove all spaces + osm_config_schema_version="${osm_config_schema_version//[[:space:]]/}" + #take first 10 characters + osm_config_schema_version="$(echo $osm_config_schema_version| cut -c1-10)" + + export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version + echo "export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version" >> ~/.bashrc + source ~/.bashrc + echo "AZMON_OSM_CFG_SCHEMA_VERSION:$AZMON_OSM_CFG_SCHEMA_VERSION" +fi + export PROXY_ENDPOINT="" # Check for internet connectivity or workspace deletion From a517d57532c5c723b2ac89911e2a088b670a9e4d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 25 Jan 2021 18:51:32 -0800 Subject: [PATCH 045/175] more changes for configmap --- build/linux/installer/datafiles/base_container.data | 1 + .../linux/installer/scripts/tomlparser-osm-config.rb | 4 ++-- kubernetes/linux/main.sh | 12 ++++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index e89b8537d..cae9ab21d 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -131,6 +131,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root /opt/tomlparser-npm-config.rb; build/linux/installer/scripts/tomlparser-npm-config.rb; 755; root; root +/opt/tomlparser-osm-config.rb; build/linux/installer/scripts/tomlparser-osm-config.rb; 755; root; root /opt/microsoft/omsagent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index ddc5a7ca2..536795e04 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -13,6 +13,7 @@ @configMapMountPath = "/etc/config/osm-settings" @configSchemaVersion = "" @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" +@osmMetricNamespaces = [] #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @metricVersion = 2 @@ -87,7 +88,6 @@ def populateSettingValuesFromConfigMap(parsedConfig) if (File.file?(@configMapMountPath)) ConfigParseErrorLogger.logError("config::osm::unsupported/missing config schema version - '#{@osmConfigSchemaVersion}' , using defaults, please use supported schema version") end - @osmMetricNamespaces = [] end #replace place holders in configuration file @@ -116,7 +116,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) end tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", @osmPluginConfigsWithNamespaces) else - puts "Using defaults for OSM configuration since there was an error in input or no namespaces were set" + puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") end diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 4cb6f2146..34dcab4f8 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -266,6 +266,18 @@ cat config_metric_collection_env_var | while read line; do done source config_metric_collection_env_var +#Parse the OSM configmap to set the right environment variables for metric collection settings +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + fi +fi + #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" #Defaults to use port 10255 From 3fae90d179c80a7025e4a0d308a01d3f64bacdad Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 25 Jan 2021 19:35:20 -0800 Subject: [PATCH 046/175] fixing bug --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 536795e04..77a678d1f 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -91,7 +91,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) end #replace place holders in configuration file -tgfConfig = File.read(tgfConfigFileSidecar) #read returns only after closing the file +tgfConfig = File.read(@tgfConfigFileSidecar) #read returns only after closing the file if @osmMetricNamespaces.length > 0 osmPluginConfigsWithNamespaces = "" From 8454561f786d121307a4c92367ff2477c267ec19 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 11:29:01 -0800 Subject: [PATCH 047/175] bug fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 77a678d1f..d1f665b99 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -120,8 +120,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") end -File.open(tgfConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope -puts "config::osm::Successfully substituted the OSM placeholders in #{tgfConfigFile} file in sidecar container" +File.open(@tgfConfigFileSidecar, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope +puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfConfigFileSidecar} file in sidecar container" # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") From c588ca6e85d6a330a12c721668dc70dc3a291942 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 14:58:56 -0800 Subject: [PATCH 048/175] fixing more bugs --- build/linux/installer/scripts/tomlparser-osm-config.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index d1f665b99..35b970ff8 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -10,7 +10,7 @@ require_relative "ConfigParseErrorLogger" -@configMapMountPath = "/etc/config/osm-settings" +@configMapMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @configSchemaVersion = "" @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" @osmMetricNamespaces = [] @@ -132,5 +132,5 @@ def populateSettingValuesFromConfigMap(parsedConfig) telemetryFile.close else puts "config::npm::Exception while opening file for writing OSM telemetry environment variables" - puts "****************End OSM Config Processing********************" end +puts "****************End OSM Config Processing********************" From 3dc105500cc57b5d2ee80d9de5c2e777d4372bc4 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 15:19:43 -0800 Subject: [PATCH 049/175] no osm --- kubernetes/linux/main.sh | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 34dcab4f8..22137d4f2 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -267,16 +267,16 @@ done source config_metric_collection_env_var #Parse the OSM configmap to set the right environment variables for metric collection settings -if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - - cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source integration_osm_config_env_var - fi -fi +# if [ ! -e "/etc/config/kube.conf" ]; then +# if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +# /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + +# cat integration_osm_config_env_var | while read line; do +# echo $line >> ~/.bashrc +# done +# source integration_osm_config_env_var +# fi +# fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" From 1dbfe4ef8f7b6b1a3f319c3aaa087f1f55564f98 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 16:40:54 -0800 Subject: [PATCH 050/175] bug fix --- build/linux/installer/conf/telegraf-prom-side-car.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index b86713464..4dfffffc3 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -772,7 +772,7 @@ $AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER -$AZMON_SIDECAR_OSM_PROM_PLUGINS +#$AZMON_SIDECAR_OSM_PROM_PLUGINS # ##npm From cb477230eefede72f31bb38134d7718d859650eb Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 17:23:23 -0800 Subject: [PATCH 051/175] fixing bugs --- .../conf/telegraf-prom-side-car.conf | 2 +- .../scripts/tomlparser-osm-config.rb | 11 +++++++++- .../linux/defaultpromenvvariables-sidecar | 1 + kubernetes/linux/main.sh | 20 +++++++++---------- 4 files changed, 22 insertions(+), 12 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 4dfffffc3..b86713464 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -772,7 +772,7 @@ $AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER -#$AZMON_SIDECAR_OSM_PROM_PLUGINS +$AZMON_SIDECAR_OSM_PROM_PLUGINS # ##npm diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 35b970ff8..012a22826 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -13,6 +13,7 @@ @configMapMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @configSchemaVersion = "" @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" +@tgfTestConfigFile = "/opt/telegraf-test.conf" @osmMetricNamespaces = [] #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @@ -90,8 +91,16 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end +# Check to see if the prometheus custom config parser has created a test config file so that we can replace the settings in the test file and run it, If not create +# a test config file by copying contents of the actual sidecar telegraf config file. +if (!File.exist?(@tgfTestConfigFile)) + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + puts "test telegraf sidecar config file #{@tgfTestConfigFile} does not exist, creating new one" + FileUtils.cp(@tgfConfigFileSidecar, @tgfTestConfigFile) +end + #replace place holders in configuration file -tgfConfig = File.read(@tgfConfigFileSidecar) #read returns only after closing the file +tgfConfig = File.read(@tgfTestConfigFile) #read returns only after closing the file if @osmMetricNamespaces.length > 0 osmPluginConfigsWithNamespaces = "" diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index 7a2022e21..6ceb28516 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -3,5 +3,6 @@ export AZMON_SIDECAR_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" export AZMON_SIDECAR_PROM_FIELDPASS="[]" export AZMON_SIDECAR_PROM_FIELDDROP="[]" export AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_SIDECAR_OSM_PROM_PLUGINS="" export AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = \"\"" export AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = \"\"" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 22137d4f2..34dcab4f8 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -267,16 +267,16 @@ done source config_metric_collection_env_var #Parse the OSM configmap to set the right environment variables for metric collection settings -# if [ ! -e "/etc/config/kube.conf" ]; then -# if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then -# /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - -# cat integration_osm_config_env_var | while read line; do -# echo $line >> ~/.bashrc -# done -# source integration_osm_config_env_var -# fi -# fi +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + fi +fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" From 6fdc78e112ad920c1001137aa2025dcd39babb6a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 17:39:56 -0800 Subject: [PATCH 052/175] bug fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 012a22826..5a74ebafd 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -8,12 +8,13 @@ require_relative "tomlrb" end +require "fileutils" require_relative "ConfigParseErrorLogger" @configMapMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @configSchemaVersion = "" @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" -@tgfTestConfigFile = "/opt/telegraf-test.conf" +@tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" @osmMetricNamespaces = [] #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering From 239c49bc463194d36ba9c938bcf5e77e63a61b62 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 17:52:26 -0800 Subject: [PATCH 053/175] bug fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 5a74ebafd..a86e26c67 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -130,8 +130,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") end -File.open(@tgfConfigFileSidecar, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope -puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfConfigFileSidecar} file in sidecar container" +File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope +puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") From 28f9044c9279f977d5ce021e9ea106d480c86c96 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 18:22:30 -0800 Subject: [PATCH 054/175] bug fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- kubernetes/container-azm-ms-osmconfig.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index a86e26c67..60016e091 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -124,7 +124,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end end - tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", @osmPluginConfigsWithNamespaces) + tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) else puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") diff --git a/kubernetes/container-azm-ms-osmconfig.yaml b/kubernetes/container-azm-ms-osmconfig.yaml index 1743959fe..55ef45f07 100644 --- a/kubernetes/container-azm-ms-osmconfig.yaml +++ b/kubernetes/container-azm-ms-osmconfig.yaml @@ -9,7 +9,7 @@ data: ver1 osm-metric-collection-configuration: |- # OSM metric collection settings - [osm-metric-collection-configuration.settings] + [osm_metric_collection_configuration.settings] # Namespaces to monitor monitor_namespaces = ["namespace1", "namespace2"] metadata: From bd98690282c06855ed3736155edf4b160aec8ff1 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 18:41:47 -0800 Subject: [PATCH 055/175] fixing spaces --- .../scripts/tomlparser-osm-config.rb | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 60016e091..1a9e93d29 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -111,16 +111,16 @@ def populateSettingValuesFromConfigMap(parsedConfig) namespace.strip! if namespace.length > 0 osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] -monitor_kubernetes_pods = true -monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} -monitor_kubernetes_pods_namespace = \"#{namespace}\" -fieldpass = #{@fieldPassSetting} -metric_version = #{@metricVersion} -url_tag = \"#{@urlTag}\" -bearer_token = \"#{@bearerToken}\" -response_timeout = \"#{@responseTimeout}\" -tls_ca = \"#{@tlsCa}\" -insecure_skip_verify = #{@insecureSkipVerify}\n" + monitor_kubernetes_pods = true + monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} + monitor_kubernetes_pods_namespace = \"#{namespace}\" + fieldpass = #{@fieldPassSetting} + metric_version = #{@metricVersion} + url_tag = \"#{@urlTag}\" + bearer_token = \"#{@bearerToken}\" + response_timeout = \"#{@responseTimeout}\" + tls_ca = \"#{@tlsCa}\" + insecure_skip_verify = #{@insecureSkipVerify}\n" end end end From 3f4fc222f6bbc04268b2fa6120f86fdcb4dfad71 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 18:45:08 -0800 Subject: [PATCH 056/175] test changes --- build/linux/installer/scripts/tomlparser-osm-config.rb | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 1a9e93d29..6757aa7cc 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -20,7 +20,9 @@ #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @metricVersion = 2 @monitorKubernetesPodsVersion = 2 -@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" +#@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" +@fieldPassSetting = "[\"go_goroutines\", \" go_memstats_alloc_bytes\"]" + @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" @responseTimeout = "15s" From 9c657ad0d83252af32e72ad48b4f703874d30117 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 26 Jan 2021 18:58:36 -0800 Subject: [PATCH 057/175] fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 6757aa7cc..64a0d57c7 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,7 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"go_goroutines\", \" go_memstats_alloc_bytes\"]" +@fieldPassSetting = "[\"go_goroutines\", \"go_memstats_alloc_bytes\"]" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From 277f036b456c6172078c13c0b52ef8dcc34ac88d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 27 Jan 2021 14:45:11 -0800 Subject: [PATCH 058/175] some tests --- build/linux/installer/conf/telegraf-prom-side-car.conf | 1 + build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index b86713464..d35dd4b6d 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -772,6 +772,7 @@ $AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER +## OSM Prometheus configuration $AZMON_SIDECAR_OSM_PROM_PLUGINS diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 64a0d57c7..c9dc21824 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,7 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"go_goroutines\", \"go_memstats_alloc_bytes\"]" +@fieldPassSetting = "[\"go_goroutines\"]" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From c1abb672eed1469634fdf81afee0884f07d130ef Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 27 Jan 2021 18:24:08 -0800 Subject: [PATCH 059/175] changes --- .../scripts/tomlparser-prom-customconfig.rb | 13 ++++++++++++- kubernetes/linux/main.sh | 9 +++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index c95b51b88..163dcca13 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -247,16 +247,18 @@ def populateSettingValuesFromConfigMap(parsedConfig) fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDPASS", fieldPassSetting) fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDDROP", fieldDropSetting) + #new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDDROP", fieldDropSetting) # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - # - to use defaults in case of nil settings + monitorKubernetesPodsNSConfig = [] if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) # Adding a check to see if an empty array is passed for kubernetes namespaces if (monitorKubernetesPodsNamespaces.length > 0) new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces else new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = 0 @@ -266,6 +268,15 @@ def populateSettingValuesFromConfigMap(parsedConfig) monitorKubernetesPodsNamespacesLength = 0 end + # Add fielddrop as environment variable so that OSM parser can append to it if needed + file = File.open("prom_config_shared_settings_env_var", "w") + if !file.nil? + file.write("export AZMON_SIDECAR_PROM_FIELDDROP=#{fieldDropSetting}\n") + # Close file after writing all environment variables + file.close + puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" + end + File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for prometheus side car" #Set environment variables for telemetry diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 34dcab4f8..22cef024f 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -250,6 +250,15 @@ if [ -e "telemetry_prom_config_env_var" ]; then source telemetry_prom_config_env_var fi +#Sourcing prometheus side car config settings if it exists +if [ -e "prom_config_shared_settings_env_var" ]; then + cat prom_config_shared_settings_env_var | while read line; do + echo $line >> ~/.bashrc + done + source prom_config_shared_settings_env_var +fi + + #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb From 5aeec63dbc3b5abd67447068a07dbd9e9493dea4 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 27 Jan 2021 18:40:36 -0800 Subject: [PATCH 060/175] conflict changes --- .../scripts/tomlparser-osm-config.rb | 10 +++++ .../scripts/tomlparser-prom-customconfig.rb | 9 ----- kubernetes/linux/main.sh | 39 +++++++++++-------- 3 files changed, 32 insertions(+), 26 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index c9dc21824..10d84ddd0 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -135,6 +135,16 @@ def populateSettingValuesFromConfigMap(parsedConfig) File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" +# Set OSM namespaces as environment variable so that prometheus custom config parser can read it and add necessary fielddrops to avoid data duplication +# of OSM metrics +promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") +if !promSettingsSharedfile.nil? + promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") + # Close file after writing all environment variables + promSettingsSharedfile.close + puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" +end + # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 163dcca13..204235ebb 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -268,15 +268,6 @@ def populateSettingValuesFromConfigMap(parsedConfig) monitorKubernetesPodsNamespacesLength = 0 end - # Add fielddrop as environment variable so that OSM parser can append to it if needed - file = File.open("prom_config_shared_settings_env_var", "w") - if !file.nil? - file.write("export AZMON_SIDECAR_PROM_FIELDDROP=#{fieldDropSetting}\n") - # Close file after writing all environment variables - file.close - puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" - end - File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for prometheus side car" #Set environment variables for telemetry diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 22cef024f..0939605b7 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -219,6 +219,27 @@ if [ ! -e "/etc/config/kube.conf" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb fi +#Parse the OSM configmap to set the right environment variables for metric collection settings +#This needs to be before the prometheus custom config map parser since we have namespace duplication logic in place. +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + + #Sourcing prometheus side car config settings if it exists + if [ -e "prom_config_shared_settings_env_var" ]; then + cat prom_config_shared_settings_env_var | while read line; do + echo $line >> ~/.bashrc + done + source prom_config_shared_settings_env_var + fi + fi +fi + #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb @@ -250,13 +271,7 @@ if [ -e "telemetry_prom_config_env_var" ]; then source telemetry_prom_config_env_var fi -#Sourcing prometheus side car config settings if it exists -if [ -e "prom_config_shared_settings_env_var" ]; then - cat prom_config_shared_settings_env_var | while read line; do - echo $line >> ~/.bashrc - done - source prom_config_shared_settings_env_var -fi + #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. @@ -275,17 +290,7 @@ cat config_metric_collection_env_var | while read line; do done source config_metric_collection_env_var -#Parse the OSM configmap to set the right environment variables for metric collection settings -if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source integration_osm_config_env_var - fi -fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" From f44266a588fa7047efa6e3a505971b32325a8291 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 29 Jan 2021 11:17:08 -0800 Subject: [PATCH 061/175] update interval for osm sidecar config --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 10d84ddd0..2de25e18e 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -23,6 +23,7 @@ #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" @fieldPassSetting = "[\"go_goroutines\"]" +@scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" @responseTimeout = "15s" @@ -113,6 +114,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) namespace.strip! if namespace.length > 0 osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + interval = \"#{@scrapeInterval}\" monitor_kubernetes_pods = true monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} monitor_kubernetes_pods_namespace = \"#{namespace}\" From e27c438b8f82e6c54b355186b7f8c6782333db49 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 29 Jan 2021 12:04:01 -0800 Subject: [PATCH 062/175] get namespaces --- kubernetes/linux/main.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 0939605b7..a0808dbc3 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -220,7 +220,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then fi #Parse the OSM configmap to set the right environment variables for metric collection settings -#This needs to be before the prometheus custom config map parser since we have namespace duplication logic in place. +#This needs to be done before the prometheus custom config map parsing since we have namespace duplication logic in place. if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb @@ -272,8 +272,6 @@ if [ -e "telemetry_prom_config_env_var" ]; then fi - - #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb @@ -322,6 +320,7 @@ else echo "Making curl request to cadvisor endpoint with port 10255 to get the configured container runtime on kubelet" podWithValidContainerId=$(curl -s http://$NODE_IP:10255/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') fi +#podWithValidContainerId=$(curl -s -k -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://$NODE_IP:10250/pods | jq -R 'fromjson? | [ .items[] | .metadata.namespace ] ' ) if [ ! -z "$podWithValidContainerId" ]; then containerRuntime=$(echo $podWithValidContainerId | jq -r '.status.containerStatuses[0].containerID' | cut -d ':' -f 1) From faf2ef0f8cf4fe6b2d8211bf9aa34cdf575753c5 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 29 Jan 2021 12:55:11 -0800 Subject: [PATCH 063/175] adding prefix for OSM tags --- build/linux/installer/scripts/tomlparser-osm-config.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 2de25e18e..2b8303b94 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -114,6 +114,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) namespace.strip! if namespace.length > 0 osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + name_prefix=\"container.azm.ms.osm\" interval = \"#{@scrapeInterval}\" monitor_kubernetes_pods = true monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} From 4554b52c7883417274444b8db946cc549757b079 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 18:04:54 -0800 Subject: [PATCH 064/175] appmap changes --- source/plugins/go/src/oms.go | 89 ++++++++++++++++++++++++++++-------- 1 file changed, 71 insertions(+), 18 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 5a678781c..fb92db0a6 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -3,6 +3,7 @@ package main import ( "bytes" "context" + "crypto/rand" "encoding/json" "fmt" "io" @@ -194,16 +195,16 @@ type DataItem struct { } type DataItemADX struct { - TimeGenerated string `json:"TimeGenerated"` - Computer string `json:"Computer"` - ContainerID string `json:"ContainerID"` - ContainerName string `json:"ContainerName"` - PodName string `json:"PodName"` - PodNamespace string `json:"PodNamespace"` - LogMessage string `json:"LogMessage"` - LogSource string `json:"LogSource"` + TimeGenerated string `json:"TimeGenerated"` + Computer string `json:"Computer"` + ContainerID string `json:"ContainerID"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` //PodLabels string `json:"PodLabels"` - AzureResourceId string `json:"AzureResourceId"` + AzureResourceId string `json:"AzureResourceId"` } // telegraf metric DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin @@ -219,6 +220,27 @@ type laTelegrafMetric struct { Computer string `json:"Computer"` } +type appMapOsmRequestMetric struct { + CollectionTime string `json:"CollectionTime"` + OperationId string `json:"OperationId"` + ParentId string `json:"ParentId"` + AppRoleName string `json:"AppRoleName"` + DurationMs float64 `json:"DurationMs"` + Success bool `json:"Success"` + ItemCount int64 `json:"ItemCount"` +} + +type appMapOsmDependencyMetric struct { + CollectionTime string `json:"CollectionTime"` + OperationId string `json:"OperationId"` + Id string `json:"Id"` + Target string `json:"Target"` + AppRoleName string `json:"AppRoleName"` + DurationMs float64 `json:"DurationMs"` + Success bool `json:"Success"` + ItemCount int64 `json:"ItemCount"` +} + // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point type InsightsMetricsBlob struct { DataType string `json:"DataType"` @@ -326,6 +348,20 @@ func createLogger() *log.Logger { return logger } +// newUUID generates a random UUID according to RFC 4122 +func newUUID() (string, error) { + uuid := make([]byte, 16) + n, err := io.ReadFull(rand.Reader, uuid) + if n != len(uuid) || err != nil { + return "", err + } + // variant bits; see section 4.1.1 + uuid[8] = uuid[8]&^0xc0 | 0x80 + // version 4 (pseudo-random); see section 4.1.3 + uuid[6] = uuid[6]&^0xf0 | 0x40 + return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil +} + func updateContainerImageNameMaps() { for ; true; <-ContainerImageNameRefreshTicker.C { Log("Updating ImageIDMap and NameIDMap") @@ -641,8 +677,13 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri var laMetrics []*laTelegrafMetric var tags map[interface{}]interface{} + // string appName + // string destinationAppName + // string id + // string operationId tags = m["tags"].(map[interface{}]interface{}) tagMap := make(map[string]string) + metricNamespace := fmt.Sprintf("%s", m["name"]) for k, v := range tags { key := fmt.Sprintf("%s", k) if key == "" { @@ -683,6 +724,18 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri //Log ("la metric:%v", laMetric) laMetrics = append(laMetrics, &laMetric) + + metricName := fmt.Sprintf("%s", k) + if (metricName == "envoy_cluster_upstream_rq_active") && (strings.HasPrefix(metricNamespace, "container.azm.ms.osm")) { + appName := tagMap["app"] + destinationAppName := tagMap["envoy_cluster_name"] + uuid, err := newUUID() + if err != nil { + Log("translateTelegrafMetrics::error while generating GUID: %v\n", err) + } + Log("translateTelegrafMetrics::%s\n", uuid) + + } } return laMetrics, nil } @@ -871,15 +924,15 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { stringMap["PodNamespace"] = k8sNamespace stringMap["ContainerName"] = containerName dataItemADX = DataItemADX{ - TimeGenerated: stringMap["LogEntryTimeStamp"], - Computer: stringMap["Computer"], - ContainerID: stringMap["Id"], - ContainerName: stringMap["ContainerName"], - PodName: stringMap["PodName"], - PodNamespace: stringMap["PodNamespace"], - LogMessage: stringMap["LogEntry"], - LogSource: stringMap["LogEntrySource"], - AzureResourceId: stringMap["AzureResourceId"], + TimeGenerated: stringMap["LogEntryTimeStamp"], + Computer: stringMap["Computer"], + ContainerID: stringMap["Id"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogEntry"], + LogSource: stringMap["LogEntrySource"], + AzureResourceId: stringMap["AzureResourceId"], } //ADX dataItemsADX = append(dataItemsADX, dataItemADX) From cd3b9d958bde6331bfef67cd3bc8d037d8c9bec7 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 18:21:28 -0800 Subject: [PATCH 065/175] app map changes --- source/plugins/go/src/oms.go | 60 ++++++++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 9 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index fb92db0a6..c1a927159 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -673,9 +673,11 @@ func flushKubeMonAgentEventRecords() { } //Translates telegraf time series to one or more Azure loganalytics metric(s) -func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetric, error) { +func translateTelegrafMetrics(m map[interface{}]interface{}, appMapRequests map[interface{}]interface{}, appMapDependencies map[interface{}]interface{}) ([]*laTelegrafMetric, error) { var laMetrics []*laTelegrafMetric + var appMapOsmRequestMetrics []*appMapOsmRequestMetric + var appMapOsmDependencyMetrics []*appMapOsmDependencyMetric var tags map[interface{}]interface{} // string appName // string destinationAppName @@ -725,19 +727,59 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri //Log ("la metric:%v", laMetric) laMetrics = append(laMetrics, &laMetric) + // OSM metric population for AppMap metricName := fmt.Sprintf("%s", k) if (metricName == "envoy_cluster_upstream_rq_active") && (strings.HasPrefix(metricNamespace, "container.azm.ms.osm")) { - appName := tagMap["app"] - destinationAppName := tagMap["envoy_cluster_name"] - uuid, err := newUUID() - if err != nil { - Log("translateTelegrafMetrics::error while generating GUID: %v\n", err) - } - Log("translateTelegrafMetrics::%s\n", uuid) + if fv > 0 { + appName := tagMap["app"] + destinationAppName := tagMap["envoy_cluster_name"] + itemCount := 1 + success := true + durationMs := 1.0 + operationId, err := newUUID() + if err != nil { + Log("translateTelegrafMetrics::error while generating operationId GUID: %v\n", err) + } + Log("translateTelegrafMetrics::%s\n", operationId) + id, err := newUUID() + if err != nil { + Log("translateTelegrafMetrics::error while generating id GUID: %v\n", err) + } + Log("translateTelegrafMetrics::%s\n", id) + collectionTimeValue := m["timestamp"].(uint64) + osmRequestMetric := appMapOsmRequestMetric{ + OperationId: fmt.Sprintf("%s", operationId), + ParentId: fmt.Sprintf("%s", id), + AppRoleName: fmt.Sprintf("%s", destinationAppName), + DurationMs: durationMs, + Success: success, + ItemCount: itemCount, + CollectionTime: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), + // Computer: Computer, //this is the collection agent's computer name, not necessarily to which computer the metric applies to + } + + Log("osm request metric:%v", osmRequestMetric) + appMapOsmRequestMetrics = append(appMapOsmRequestMetrics, &osmRequestMetric) + + osmDependencyMetric := appMapOsmDependencyMetric{ + OperationId: fmt.Sprintf("%s", operationId), + Id: fmt.Sprintf("%s", id), + AppRoleName: fmt.Sprintf("%s", appName), + Target: fmt.Sprintf("%s", destinationAppName), + DurationMs: durationMs, + Success: success, + ItemCount: itemCount, + CollectionTime: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), + // Computer: Computer, //this is the collection agent's computer name, not necessarily to which computer the metric applies to + } + + Log("osm dependency metric:%v", osmDependencyMetric) + appMapOsmDependencyMetrics = append(appMapOsmDependencyMetrics, &osmDependencyMetric) + } } } - return laMetrics, nil + return laMetrics, appMapOsmRequestMetrics, appMapOsmDependencyMetrics, nil } // send metrics from Telegraf to LA. 1) Translate telegraf timeseries to LA metric(s) 2) Send it to LA as 'InsightsMetrics' fixed type From ef8c8028d5da9675915c75db41120d4f18e7270d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 18:51:06 -0800 Subject: [PATCH 066/175] appmap changes --- source/plugins/go/src/oms.go | 171 ++++++++++++++++++++++++++++++++++- 1 file changed, 169 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index c1a927159..0271bb5c4 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -36,6 +36,12 @@ const ContainerLogDataType = "CONTAINER_LOG_BLOB" // DataType for Insights metric const InsightsMetricsDataType = "INSIGHTS_METRICS_BLOB" +// DataType for ApplicationInsights AppRequests +const AppRequestsDataType = "APPLICATIONINSIGHTS_APPREQUESTS" + +// DataType for ApplicationInsights AppDependencies +const AppDependenciesDataType = "APPLICATIONINSIGHTS_APPDEPENDENCIES" + // DataType for KubeMonAgentEvent const KubeMonAgentEventDataType = "KUBE_MON_AGENT_EVENTS_BLOB" @@ -248,6 +254,18 @@ type InsightsMetricsBlob struct { DataItems []laTelegrafMetric `json:"DataItems"` } +type AppMapOsmRequestBlob struct { + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []appMapOsmRequestMetric `json:"DataItems"` +} + +type AppMapOsmDependencyBlob struct { + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []appMapOsmDependencyMetric `json:"DataItems"` +} + // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point type ContainerLogBlob struct { DataType string `json:"DataType"` @@ -674,7 +692,6 @@ func flushKubeMonAgentEventRecords() { //Translates telegraf time series to one or more Azure loganalytics metric(s) func translateTelegrafMetrics(m map[interface{}]interface{}, appMapRequests map[interface{}]interface{}, appMapDependencies map[interface{}]interface{}) ([]*laTelegrafMetric, error) { - var laMetrics []*laTelegrafMetric var appMapOsmRequestMetrics []*appMapOsmRequestMetric var appMapOsmDependencyMetrics []*appMapOsmDependencyMetric @@ -785,6 +802,8 @@ func translateTelegrafMetrics(m map[interface{}]interface{}, appMapRequests map[ // send metrics from Telegraf to LA. 1) Translate telegraf timeseries to LA metric(s) 2) Send it to LA as 'InsightsMetrics' fixed type func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int { var laMetrics []*laTelegrafMetric + var appMapOsmRequestMetrics []*appMapOsmRequestMetric + var appMapOsmDependencyMetrics []*appMapOsmDependencyMetric if (telegrafRecords == nil) || !(len(telegrafRecords) > 0) { Log("PostTelegrafMetricsToLA::Error:no timeseries to derive") @@ -792,13 +811,15 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } for _, record := range telegrafRecords { - translatedMetrics, err := translateTelegrafMetrics(record) + translatedMetrics, osmRequestMetrics, osmDependencyMetrics, err := translateTelegrafMetrics(record) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when translating telegraf metric to log analytics metric %q", err) Log(message) //SendException(message) //This will be too noisy } laMetrics = append(laMetrics, translatedMetrics...) + appMapOsmRequestMetrics = append(appMapOsmRequestMetrics, osmRequestMetrics...) + appMapOsmDependencyMetrics = append(appMapOsmDependencyMetrics, osmDependencyMetrics...) } if (laMetrics == nil) || !(len(laMetrics) > 0) { @@ -809,6 +830,22 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log(message) } + if (appMapOsmRequestMetrics == nil) || !(len(appMapOsmRequestMetrics) > 0) { + Log("PostTelegrafMetricsToLA::Info:no OSM request metrics derived from timeseries data") + return output.FLB_OK + } else { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived osm request %v metrics from %v timeseries", len(appMapOsmRequestMetrics), len(telegrafRecords)) + Log(message) + } + + if (appMapOsmDependencyMetrics == nil) || !(len(appMapOsmDependencyMetrics) > 0) { + Log("PostTelegrafMetricsToLA::Info:no OSM dependency metrics derived from timeseries data") + return output.FLB_OK + } else { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived osm dependency %v metrics from %v timeseries", len(appMapOsmDependencyMetrics), len(telegrafRecords)) + Log(message) + } + var metrics []laTelegrafMetric var i int @@ -873,6 +910,136 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0) Log("PostTelegrafMetricsToLA::Info:Successfully flushed %v records in %v", numMetrics, elapsed) + // AppMap Requests + var requestMetrics []appMapOsmRequestMetric + var j int + + for j = 0; j < len(appMapOsmRequestMetrics); j++ { + requestMetrics = append(requestMetrics, *appMapOsmRequestMetrics[j]) + } + + appMapOsmRequestMetrics := AppMapOsmRequestBlob{ + DataType: AppRequestsDataType, + IPName: "LogManagement", + DataItems: requestMetrics} + + requestJsonBytes, err := json.Marshal(appMapOsmRequestMetrics) + + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app requests json %q", err) + Log(message) + SendException(message) + return output.FLB_OK + } + + //Post metrics data to LA + appRequestReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(requestJsonBytes)) + + //req.URL.Query().Add("api-version","2016-04-01") + + //set headers + appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) + appRequestReq.Header.Set("User-Agent", userAgent) + appRequestReqID := uuid.New().String() + appRequestReq.Header.Set("X-Request-ID", appRequestReqID) + + //expensive to do string len for every request, so use a flag + if ResourceCentric == true { + appRequestReq.Header.Set("x-ms-AzureResourceId", ResourceID) + } + + start := time.Now() + appRequestResp, err := HTTPClient.Do(appRequestReq) + reqElapsed := time.Since(start) + + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending apprequest %v metrics. duration:%v err:%q \n", len(appMapOsmRequestMetrics), reqElapsed, err.Error()) + Log(message) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) + return output.FLB_RETRY + } + + if appRequestResp == nil || appRequestResp.StatusCode != 200 { + if appRequestResp != nil { + Log("PostTelegrafMetricsToLA::Error:(retriable) app requests RequestID %s Response Status %v Status Code %v", appRequestReqID, appRequestResp.Status, appRequestResp.StatusCode) + } + if appRequestResp != nil && appRequestResp.StatusCode == 429 { + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1) + } + return output.FLB_RETRY + } + + defer appRequestResp.Body.Close() + + appRequestNumMetrics := len(appMapOsmRequestMetrics) + UpdateNumTelegrafMetricsSentTelemetry(appRequestNumMetrics, 0, 0) + Log("PostTelegrafMetricsToLA::Info:AppRequests:Successfully flushed %v records in %v", appRequestNumMetrics, reqElapsed) + + // AppMap Dependencies + var dependencyMetrics []appMapOsmDependencyMetric + var myint int + + for myint = 0; myint < len(appMapOsmRequestMetrics); myint++ { + dependencyMetrics = append(requestMetrics, *appMapOsmRequestMetrics[myint]) + } + + appMapOsmDependencyMetrics := AppMapOsmDependencyBlob{ + DataType: AppDependenciesDataType, + IPName: "LogManagement", + DataItems: dependencyMetrics} + + dependencyJsonBytes, err := json.Marshal(appMapOsmDependencyMetrics) + + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app dependencies json %q", err) + Log(message) + SendException(message) + return output.FLB_OK + } + + //Post metrics data to LA + appDependencyReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(dependencyJsonBytes)) + + //req.URL.Query().Add("api-version","2016-04-01") + + //set headers + appDependencyReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) + appDependencyReq.Header.Set("User-Agent", userAgent) + appDependencyReqID := uuid.New().String() + appDependencyReq.Header.Set("X-Request-ID", appDependencyReqID) + + //expensive to do string len for every request, so use a flag + if ResourceCentric == true { + appDependencyReq.Header.Set("x-ms-AzureResourceId", ResourceID) + } + + start := time.Now() + appDependencyResp, err := HTTPClient.Do(appDependencyReq) + depElapsed := time.Since(start) + + if err != nil { + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending appdependency %v metrics. duration:%v err:%q \n", len(appMapOsmDependencyMetrics), elapsed, err.Error()) + Log(message) + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) + return output.FLB_RETRY + } + + if appDependencyResp == nil || appDependencyResp.StatusCode != 200 { + if appDependencyResp != nil { + Log("PostTelegrafMetricsToLA::Error:(retriable) app dependency RequestID %s Response Status %v Status Code %v", appDependencyReqID, appDependencyResp.Status, appDependencyResp.StatusCode) + } + if appDependencyResp != nil && appDependencyResp.StatusCode == 429 { + UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1) + } + return output.FLB_RETRY + } + + defer appDependencyResp.Body.Close() + + appDependencyNumMetrics := len(appMapOsmDependencyMetrics) + UpdateNumTelegrafMetricsSentTelemetry(appDependencyNumMetrics, 0, 0) + Log("PostTelegrafMetricsToLA::Info:AppDependency:Successfully flushed %v records in %v", appDependencyNumMetrics, depElapsed) + return output.FLB_OK } From b8843c5a74e659b9b65c153ed0b0de22c88350dd Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 18:54:40 -0800 Subject: [PATCH 067/175] changes --- source/plugins/go/src/oms.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 0271bb5c4..b6251f66e 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -750,9 +750,9 @@ func translateTelegrafMetrics(m map[interface{}]interface{}, appMapRequests map[ if fv > 0 { appName := tagMap["app"] destinationAppName := tagMap["envoy_cluster_name"] - itemCount := 1 + itemCount := int64(1) success := true - durationMs := 1.0 + durationMs := float64(1.0) operationId, err := newUUID() if err != nil { Log("translateTelegrafMetrics::error while generating operationId GUID: %v\n", err) From 1b3b8b6094962d8abee5644a7cf68b296914f8c8 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 18:56:18 -0800 Subject: [PATCH 068/175] changes --- source/plugins/go/src/oms.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index b6251f66e..d697fe95d 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -691,7 +691,7 @@ func flushKubeMonAgentEventRecords() { } //Translates telegraf time series to one or more Azure loganalytics metric(s) -func translateTelegrafMetrics(m map[interface{}]interface{}, appMapRequests map[interface{}]interface{}, appMapDependencies map[interface{}]interface{}) ([]*laTelegrafMetric, error) { +func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetric, []*appMapOsmRequestMetric, []*appMapOsmDependencyMetric, error) { var laMetrics []*laTelegrafMetric var appMapOsmRequestMetrics []*appMapOsmRequestMetric var appMapOsmDependencyMetrics []*appMapOsmDependencyMetric @@ -721,7 +721,7 @@ func translateTelegrafMetrics(m map[interface{}]interface{}, appMapRequests map[ tagJson, err := json.Marshal(&tagMap) if err != nil { - return nil, err + return nil, nil, nil, err } for k, v := range fieldMap { From e31f31081af19541f513e421a4760e07b8e3e73a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 18:59:54 -0800 Subject: [PATCH 069/175] changes --- source/plugins/go/src/oms.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index d697fe95d..967db0ac7 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -918,12 +918,12 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int requestMetrics = append(requestMetrics, *appMapOsmRequestMetrics[j]) } - appMapOsmRequestMetrics := AppMapOsmRequestBlob{ + osmRequestMetrics := AppMapOsmRequestBlob{ DataType: AppRequestsDataType, IPName: "LogManagement", DataItems: requestMetrics} - requestJsonBytes, err := json.Marshal(appMapOsmRequestMetrics) + requestJsonBytes, err := json.Marshal(osmRequestMetrics) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app requests json %q", err) @@ -953,7 +953,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int reqElapsed := time.Since(start) if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending apprequest %v metrics. duration:%v err:%q \n", len(appMapOsmRequestMetrics), reqElapsed, err.Error()) + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending apprequest %v metrics. duration:%v err:%q \n", len(osmRequestMetrics), reqElapsed, err.Error()) Log(message) UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) return output.FLB_RETRY @@ -979,16 +979,16 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int var dependencyMetrics []appMapOsmDependencyMetric var myint int - for myint = 0; myint < len(appMapOsmRequestMetrics); myint++ { - dependencyMetrics = append(requestMetrics, *appMapOsmRequestMetrics[myint]) + for myint = 0; myint < len(appMapOsmDependencyMetrics); myint++ { + dependencyMetrics = append(dependencyMetrics, *appMapOsmDependencyMetrics[myint]) } - appMapOsmDependencyMetrics := AppMapOsmDependencyBlob{ + osmDependencyMetrics := AppMapOsmDependencyBlob{ DataType: AppDependenciesDataType, IPName: "LogManagement", DataItems: dependencyMetrics} - dependencyJsonBytes, err := json.Marshal(appMapOsmDependencyMetrics) + dependencyJsonBytes, err := json.Marshal(osmDependencyMetrics) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app dependencies json %q", err) From fe11037be6a80c99d2ecc19ca947a62d49a0df8d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 19:00:59 -0800 Subject: [PATCH 070/175] changes --- source/plugins/go/src/oms.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 967db0ac7..c92b69587 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -948,9 +948,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appRequestReq.Header.Set("x-ms-AzureResourceId", ResourceID) } - start := time.Now() + reqStart := time.Now() appRequestResp, err := HTTPClient.Do(appRequestReq) - reqElapsed := time.Since(start) + reqElapsed := time.Since(reqStart) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending apprequest %v metrics. duration:%v err:%q \n", len(osmRequestMetrics), reqElapsed, err.Error()) @@ -1013,9 +1013,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appDependencyReq.Header.Set("x-ms-AzureResourceId", ResourceID) } - start := time.Now() + depStart := time.Now() appDependencyResp, err := HTTPClient.Do(appDependencyReq) - depElapsed := time.Since(start) + depElapsed := time.Since(depStart) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending appdependency %v metrics. duration:%v err:%q \n", len(appMapOsmDependencyMetrics), elapsed, err.Error()) From 8dfeedbe5602302464ec52962cafc6527e473e31 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Feb 2021 19:01:59 -0800 Subject: [PATCH 071/175] changes --- source/plugins/go/src/oms.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index c92b69587..73518ed99 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -953,7 +953,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int reqElapsed := time.Since(reqStart) if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending apprequest %v metrics. duration:%v err:%q \n", len(osmRequestMetrics), reqElapsed, err.Error()) + message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending apprequest %v metrics. duration:%v err:%q \n", len(appMapOsmRequestMetrics), reqElapsed, err.Error()) Log(message) UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) return output.FLB_RETRY From efd587c83c818425d03454c7ef5259acf5a305f6 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Feb 2021 10:42:06 -0800 Subject: [PATCH 072/175] changes --- .../installer/scripts/tomlparser-osm-config.rb | 14 +++++++------- kubernetes/linux/main.sh | 12 ++++++------ 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 2b8303b94..e964cbcde 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -140,13 +140,13 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Set OSM namespaces as environment variable so that prometheus custom config parser can read it and add necessary fielddrops to avoid data duplication # of OSM metrics -promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") -if !promSettingsSharedfile.nil? - promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") - # Close file after writing all environment variables - promSettingsSharedfile.close - puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" -end +# promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") +# if !promSettingsSharedfile.nil? +# promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") +# # Close file after writing all environment variables +# promSettingsSharedfile.close +# puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" +# end # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index a0808dbc3..edea99bd1 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -231,12 +231,12 @@ if [ ! -e "/etc/config/kube.conf" ]; then source integration_osm_config_env_var #Sourcing prometheus side car config settings if it exists - if [ -e "prom_config_shared_settings_env_var" ]; then - cat prom_config_shared_settings_env_var | while read line; do - echo $line >> ~/.bashrc - done - source prom_config_shared_settings_env_var - fi + # if [ -e "prom_config_shared_settings_env_var" ]; then + # cat prom_config_shared_settings_env_var | while read line; do + # echo $line >> ~/.bashrc + # done + # source prom_config_shared_settings_env_var + # fi fi fi From 9ed7c3e9572e4ecef874205039a14e80a8358786 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Feb 2021 10:56:51 -0800 Subject: [PATCH 073/175] moving parsers --- kubernetes/linux/main.sh | 53 ++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index edea99bd1..341e578d7 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -221,24 +221,24 @@ fi #Parse the OSM configmap to set the right environment variables for metric collection settings #This needs to be done before the prometheus custom config map parsing since we have namespace duplication logic in place. -if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - - cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source integration_osm_config_env_var - - #Sourcing prometheus side car config settings if it exists - # if [ -e "prom_config_shared_settings_env_var" ]; then - # cat prom_config_shared_settings_env_var | while read line; do - # echo $line >> ~/.bashrc - # done - # source prom_config_shared_settings_env_var - # fi - fi -fi +# if [ ! -e "/etc/config/kube.conf" ]; then +# if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +# /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + +# cat integration_osm_config_env_var | while read line; do +# echo $line >> ~/.bashrc +# done +# source integration_osm_config_env_var + +# #Sourcing prometheus side car config settings if it exists +# # if [ -e "prom_config_shared_settings_env_var" ]; then +# # cat prom_config_shared_settings_env_var | while read line; do +# # echo $line >> ~/.bashrc +# # done +# # source prom_config_shared_settings_env_var +# # fi +# fi +# fi #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb @@ -288,7 +288,24 @@ cat config_metric_collection_env_var | while read line; do done source config_metric_collection_env_var +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + + #Sourcing prometheus side car config settings if it exists + # if [ -e "prom_config_shared_settings_env_var" ]; then + # cat prom_config_shared_settings_env_var | while read line; do + # echo $line >> ~/.bashrc + # done + # source prom_config_shared_settings_env_var + # fi + fi +fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" From 2b3a17b8734a66992f9e7b98a5aa2b8df98458d5 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Feb 2021 12:16:17 -0800 Subject: [PATCH 074/175] log json --- source/plugins/go/src/oms.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 73518ed99..f08173738 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -931,6 +931,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int SendException(message) return output.FLB_OK } + Log("AppMapOSMRequestMetrics-json:%v", osmRequestMetrics) //Post metrics data to LA appRequestReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(requestJsonBytes)) @@ -989,6 +990,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int DataItems: dependencyMetrics} dependencyJsonBytes, err := json.Marshal(osmDependencyMetrics) + Log("AppMapOSMDependencyMetrics-json:%v", osmDependencyMetrics) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app dependencies json %q", err) From cfb53c3ee42bdebeec8c0235ad1ecdad4f5c9966 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Feb 2021 13:33:50 -0800 Subject: [PATCH 075/175] more logs --- build/linux/installer/scripts/tomlparser-osm-config.rb | 3 +-- source/plugins/go/src/oms.go | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index e964cbcde..fcec52507 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,8 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"go_goroutines\"]" - +@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\", \envoy_cluster_upstream_rq_active\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\", \"envoy_http_downstream_rq_xx\", \"envoy_cluster_downstream_rq\", \"envoy_http_no_route\", \"envoy_server_live\", \"envoy_server_memory_heap_size\", \"envoy_server_memory_physical_size\", \"envoy_server_memory_allocated\", \"envoy_cluster_external_upstream_rq_xx\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index f08173738..64da3a5ed 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -859,6 +859,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int DataItems: metrics} jsonBytes, err := json.Marshal(laTelegrafMetrics) + Log("laTelegrafMetrics-json:%v", laTelegrafMetrics) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) From 4f64c72851678d485a0d60dfedfc94f50ddd07e4 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Feb 2021 13:54:07 -0800 Subject: [PATCH 076/175] bug fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index fcec52507..3b861997d 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,7 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\", \envoy_cluster_upstream_rq_active\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\", \"envoy_http_downstream_rq_xx\", \"envoy_cluster_downstream_rq\", \"envoy_http_no_route\", \"envoy_server_live\", \"envoy_server_memory_heap_size\", \"envoy_server_memory_physical_size\", \"envoy_server_memory_allocated\", \"envoy_cluster_external_upstream_rq_xx\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_active\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\", \"envoy_http_downstream_rq_xx\", \"envoy_cluster_downstream_rq\", \"envoy_http_no_route\", \"envoy_server_live\", \"envoy_server_memory_heap_size\", \"envoy_server_memory_physical_size\", \"envoy_server_memory_allocated\", \"envoy_cluster_external_upstream_rq_xx\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From 9a869da9bb2fd58e92cf6c293f0f02729abfcb1a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Feb 2021 18:10:46 -0800 Subject: [PATCH 077/175] adding additional fields --- source/plugins/go/src/oms.go | 135 +++++++++++++++++++++++++---------- 1 file changed, 97 insertions(+), 38 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 64da3a5ed..4c79af54b 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -3,7 +3,6 @@ package main import ( "bytes" "context" - "crypto/rand" "encoding/json" "fmt" "io" @@ -227,13 +226,42 @@ type laTelegrafMetric struct { } type appMapOsmRequestMetric struct { - CollectionTime string `json:"CollectionTime"` - OperationId string `json:"OperationId"` - ParentId string `json:"ParentId"` - AppRoleName string `json:"AppRoleName"` - DurationMs float64 `json:"DurationMs"` - Success bool `json:"Success"` - ItemCount int64 `json:"ItemCount"` + time string `json:"time"` + Id string `json:"Id"` + Source string `json:"Source"` + Name string `json:"Name"` + Url string `json:"Url"` + Success bool `json:"Success"` + ResultCode string `json:"ResultCode"` + DurationMs float64 `json:"DurationMs"` + PerformanceBucket string `json:"PerformanceBucket"` + Properties string `json:"Properties"` + Measurements string `json:"Measurements"` + OperationName string `json:"OperationName"` + OperationId string `json:"OperationId"` + ParentId string `json:"ParentId"` + SyntheticSource string `json:"SyntheticSource"` + SessionId string `json:"SessionId"` + UserId string `json:"UserId"` + UserAuthenticatedId string `json:"UserAuthenticatedId"` + UserAccountId string `json:"UserAccountId"` + AppVersion string `json:"AppVersion"` + AppRoleName string `json:"AppRoleName"` + AppRoleInstance string `json:"AppRoleInstance"` + ClientType string `json:"ClientType"` + ClientModel string `json:"ClientModel"` + ClientOS string `json:"ClientOS"` + ClientIP string `json:"ClientIP"` + ClientCity string `json:"ClientCity"` + ClientStateOrProvince string `json:"ClientStateOrProvince"` + ClientCountryOrRegion string `json:"ClientCountryOrRegion"` + ClientBrowser string `json:"ClientBrowser"` + ResourceGUID string `json:"ResourceGUID"` + IKey string `json:"IKey"` + SDKVersion string `json:"SDKVersion"` + ItemCount int64 `json:"ItemCount"` + ReferencedItemId string `json:"ReferencedItemId"` + ReferencedType string `json:"ReferencedType"` } type appMapOsmDependencyMetric struct { @@ -367,18 +395,18 @@ func createLogger() *log.Logger { } // newUUID generates a random UUID according to RFC 4122 -func newUUID() (string, error) { - uuid := make([]byte, 16) - n, err := io.ReadFull(rand.Reader, uuid) - if n != len(uuid) || err != nil { - return "", err - } - // variant bits; see section 4.1.1 - uuid[8] = uuid[8]&^0xc0 | 0x80 - // version 4 (pseudo-random); see section 4.1.3 - uuid[6] = uuid[6]&^0xf0 | 0x40 - return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil -} +// func newUUID() (string, error) { +// uuid := make([]byte, 16) +// n, err := io.ReadFull(rand.Reader, uuid) +// if n != len(uuid) || err != nil { +// return "", err +// } +// // variant bits; see section 4.1.1 +// uuid[8] = uuid[8]&^0xc0 | 0x80 +// // version 4 (pseudo-random); see section 4.1.3 +// uuid[6] = uuid[6]&^0xf0 | 0x40 +// return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil +// } func updateContainerImageNameMaps() { for ; true; <-ContainerImageNameRefreshTicker.C { @@ -753,26 +781,57 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri itemCount := int64(1) success := true durationMs := float64(1.0) - operationId, err := newUUID() - if err != nil { - Log("translateTelegrafMetrics::error while generating operationId GUID: %v\n", err) - } - Log("translateTelegrafMetrics::%s\n", operationId) - - id, err := newUUID() - if err != nil { - Log("translateTelegrafMetrics::error while generating id GUID: %v\n", err) - } + operationId, err := uuid.New().String() + // if err != nil { + // Log("translateTelegrafMetrics::error while generating operationId GUID: %v\n", err) + // } + // Log("translateTelegrafMetrics::%s\n", operationId) + + id, err := uuid.New().String() + // if err != nil { + // Log("translateTelegrafMetrics::error while generating id GUID: %v\n", err) + // } Log("translateTelegrafMetrics::%s\n", id) collectionTimeValue := m["timestamp"].(uint64) osmRequestMetric := appMapOsmRequestMetric{ - OperationId: fmt.Sprintf("%s", operationId), - ParentId: fmt.Sprintf("%s", id), - AppRoleName: fmt.Sprintf("%s", destinationAppName), - DurationMs: durationMs, - Success: success, - ItemCount: itemCount, - CollectionTime: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), + // Absolutely needed metrics for topology generation for AppMap + time: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), + OperationId: fmt.Sprintf("%s", operationId), + ParentId: fmt.Sprintf("%s", id), + AppRoleName: fmt.Sprintf("%s", destinationAppName), + DurationMs: durationMs, + Success: success, + ItemCount: itemCount, + //metrics to get ingestion working + Id: fmt.Sprintf("%s", "id"), + Source: fmt.Sprintf("%s", "Source"), + Name: fmt.Sprintf("%s", "Name"), + Url: fmt.Sprintf("%s", "Url"), + ResultCode: fmt.Sprintf("%s", "200"), + PerformanceBucket: fmt.Sprintf("%s", "PerformanceBucket"), + Properties: fmt.Sprintf("%s", ""), + Measurements: fmt.Sprintf("%s", ""), + OperationName: fmt.Sprintf("%s", "GET"), + SyntheticSource: fmt.Sprintf("%s", "SyntheticSource"), + SessionId: fmt.Sprintf("%s", "SessionId"), + UserId: fmt.Sprintf("%s", "UserId"), + UserAuthenticatedId: fmt.Sprintf("%s", "UserAuthenticatedId"), + UserAccountId: fmt.Sprintf("%s", "UserAccountId"), + AppVersion: fmt.Sprintf("%s", "v1"), + AppRoleInstance: fmt.Sprintf("%s", "AppRoleInstance"), + ClientType: fmt.Sprintf("%s", "ClientType"), + ClientModel: fmt.Sprintf("%s", "ClientModel"), + ClientOS: fmt.Sprintf("%s", "ClientOS"), + ClientIP: fmt.Sprintf("%s", "ClientIP"), + ClientCity: fmt.Sprintf("%s", "ClientCity"), + ClientStateOrProvince: fmt.Sprintf("%s", "ClientStateOrProvince"), + ClientCountryOrRegion: fmt.Sprintf("%s", "ClientCountryOrRegion"), + ClientBrowser: fmt.Sprintf("%s", "ClientBrowser"), + ResourceGUID: fmt.Sprintf("%s", "ResourceGUID"), + IKey: fmt.Sprintf("%s", "IKey"), + SDKVersion: fmt.Sprintf("%s", "SDKVersion"), + ReferencedItemId: fmt.Sprintf("%s", "ReferencedItemId"), + ReferencedType: fmt.Sprintf("%s", "ReferencedType"), // Computer: Computer, //this is the collection agent's computer name, not necessarily to which computer the metric applies to } @@ -859,7 +918,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int DataItems: metrics} jsonBytes, err := json.Marshal(laTelegrafMetrics) - Log("laTelegrafMetrics-json:%v", laTelegrafMetrics) + //Log("laTelegrafMetrics-json:%v", laTelegrafMetrics) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) From 3612cb552a71cd51418be8c3f2da96bd6e383812 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Feb 2021 18:27:44 -0800 Subject: [PATCH 078/175] fixing bugs --- source/plugins/go/src/oms.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 4c79af54b..d79959dec 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -781,13 +781,13 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri itemCount := int64(1) success := true durationMs := float64(1.0) - operationId, err := uuid.New().String() + operationId := uuid.New().String() // if err != nil { // Log("translateTelegrafMetrics::error while generating operationId GUID: %v\n", err) // } // Log("translateTelegrafMetrics::%s\n", operationId) - id, err := uuid.New().String() + id := uuid.New().String() // if err != nil { // Log("translateTelegrafMetrics::error while generating id GUID: %v\n", err) // } From 06b26576410eaf354b7b749f4586ef014a8a7da8 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 09:41:57 -0800 Subject: [PATCH 079/175] adding some more logs --- source/plugins/go/src/oms.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index d79959dec..1fe82141f 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1034,7 +1034,8 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appRequestNumMetrics := len(appMapOsmRequestMetrics) UpdateNumTelegrafMetricsSentTelemetry(appRequestNumMetrics, 0, 0) - Log("PostTelegrafMetricsToLA::Info:AppRequests:Successfully flushed %v records in %v", appRequestNumMetrics, reqElapsed) + Log("PostTelegrafMetricsToLA::Info:AppRequests:Http Request: %v", appRequestReq) + Log("PostTelegrafMetricsToLA::Info:AppRequests:Successfully flushed %v records in %v with status code %v", appRequestNumMetrics, reqElapsed, appRequestResp.StatusCode) // AppMap Dependencies var dependencyMetrics []appMapOsmDependencyMetric @@ -1100,7 +1101,8 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appDependencyNumMetrics := len(appMapOsmDependencyMetrics) UpdateNumTelegrafMetricsSentTelemetry(appDependencyNumMetrics, 0, 0) - Log("PostTelegrafMetricsToLA::Info:AppDependency:Successfully flushed %v records in %v", appDependencyNumMetrics, depElapsed) + Log("PostTelegrafMetricsToLA::Info:AppDependency:Http Request: %v", appDependencyReq) + Log("PostTelegrafMetricsToLA::Info:AppDependency:Successfully flushed %v records in %v with status code - %v", appDependencyNumMetrics, depElapsed, appDependencyResp.StatusCode) return output.FLB_OK } From 491821ad096f9fe09e580be38ed28fd2f04f9027 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 10:38:57 -0800 Subject: [PATCH 080/175] Adding req for telegraf metric --- source/plugins/go/src/oms.go | 1 + 1 file changed, 1 insertion(+) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 1fe82141f..6f9459e75 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -968,6 +968,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int numMetrics := len(laMetrics) UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0) + Log("PostTelegrafMetricsToLA::Info:AppRequests:Http Request: %v", req) Log("PostTelegrafMetricsToLA::Info:Successfully flushed %v records in %v", numMetrics, elapsed) // AppMap Requests From 3ce19dd2ff1e42fe764c3887d63d5507ccdc7b48 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 11:03:10 -0800 Subject: [PATCH 081/175] more logs --- source/plugins/go/src/oms.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 6f9459e75..10de1d9fc 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -929,7 +929,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //Post metrics data to LA req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(jsonBytes)) - + Log("LA request json bytes: %v", jsonBytes) //req.URL.Query().Add("api-version","2016-04-01") //set headers @@ -968,7 +968,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int numMetrics := len(laMetrics) UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0) - Log("PostTelegrafMetricsToLA::Info:AppRequests:Http Request: %v", req) + Log("PostTelegrafMetricsToLA::Info:LArequests:Http Request: %v", req) Log("PostTelegrafMetricsToLA::Info:Successfully flushed %v records in %v", numMetrics, elapsed) // AppMap Requests @@ -985,6 +985,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int DataItems: requestMetrics} requestJsonBytes, err := json.Marshal(osmRequestMetrics) + Log("app request json bytes: %v", requestJsonBytes) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app requests json %q", err) @@ -1053,6 +1054,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int dependencyJsonBytes, err := json.Marshal(osmDependencyMetrics) Log("AppMapOSMDependencyMetrics-json:%v", osmDependencyMetrics) + Log("app dependency json bytes: %v", dependencyJsonBytes) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app dependencies json %q", err) From 54eabe911ccc5ea8ae5a9e3cb457f21be19cf1f0 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 14:31:45 -0800 Subject: [PATCH 082/175] commenting byte logs --- source/plugins/go/src/oms.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 10de1d9fc..2fadfba9b 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -929,7 +929,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //Post metrics data to LA req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(jsonBytes)) - Log("LA request json bytes: %v", jsonBytes) + //Log("LA request json bytes: %v", jsonBytes) //req.URL.Query().Add("api-version","2016-04-01") //set headers @@ -985,7 +985,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int DataItems: requestMetrics} requestJsonBytes, err := json.Marshal(osmRequestMetrics) - Log("app request json bytes: %v", requestJsonBytes) + //Log("app request json bytes: %v", requestJsonBytes) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app requests json %q", err) @@ -1054,7 +1054,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int dependencyJsonBytes, err := json.Marshal(osmDependencyMetrics) Log("AppMapOSMDependencyMetrics-json:%v", osmDependencyMetrics) - Log("app dependency json bytes: %v", dependencyJsonBytes) + //Log("app dependency json bytes: %v", dependencyJsonBytes) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app dependencies json %q", err) From 7ba1aec480a5d349b75479bce71973d8d32fd231 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 16:05:44 -0800 Subject: [PATCH 083/175] changing data in request --- source/plugins/go/src/oms.go | 64 ++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 2fadfba9b..1491a8946 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -796,42 +796,42 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri osmRequestMetric := appMapOsmRequestMetric{ // Absolutely needed metrics for topology generation for AppMap time: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), - OperationId: fmt.Sprintf("%s", operationId), - ParentId: fmt.Sprintf("%s", id), + OperationId: fmt.Sprintf("%s", "0d0316964a57404da5f5619287ff2f62"), + ParentId: fmt.Sprintf("%s", "fba578d218224d26a4ca83ba13c354e9"), AppRoleName: fmt.Sprintf("%s", destinationAppName), - DurationMs: durationMs, + DurationMs: 898.42, Success: success, - ItemCount: itemCount, + ItemCount: 42, //metrics to get ingestion working - Id: fmt.Sprintf("%s", "id"), - Source: fmt.Sprintf("%s", "Source"), - Name: fmt.Sprintf("%s", "Name"), - Url: fmt.Sprintf("%s", "Url"), + Id: fmt.Sprintf("%s", "8be927b9-0bde-4357-87ee-73c13b6f6a05"), + Source: fmt.Sprintf("%s", "Application"), + Name: fmt.Sprintf("%s", "TestData-Request-DataGen"), + Url: fmt.Sprintf("%s", "https://portal.azure.com"), ResultCode: fmt.Sprintf("%s", "200"), - PerformanceBucket: fmt.Sprintf("%s", "PerformanceBucket"), - Properties: fmt.Sprintf("%s", ""), - Measurements: fmt.Sprintf("%s", ""), - OperationName: fmt.Sprintf("%s", "GET"), - SyntheticSource: fmt.Sprintf("%s", "SyntheticSource"), - SessionId: fmt.Sprintf("%s", "SessionId"), - UserId: fmt.Sprintf("%s", "UserId"), - UserAuthenticatedId: fmt.Sprintf("%s", "UserAuthenticatedId"), - UserAccountId: fmt.Sprintf("%s", "UserAccountId"), - AppVersion: fmt.Sprintf("%s", "v1"), - AppRoleInstance: fmt.Sprintf("%s", "AppRoleInstance"), - ClientType: fmt.Sprintf("%s", "ClientType"), - ClientModel: fmt.Sprintf("%s", "ClientModel"), - ClientOS: fmt.Sprintf("%s", "ClientOS"), - ClientIP: fmt.Sprintf("%s", "ClientIP"), - ClientCity: fmt.Sprintf("%s", "ClientCity"), - ClientStateOrProvince: fmt.Sprintf("%s", "ClientStateOrProvince"), - ClientCountryOrRegion: fmt.Sprintf("%s", "ClientCountryOrRegion"), - ClientBrowser: fmt.Sprintf("%s", "ClientBrowser"), - ResourceGUID: fmt.Sprintf("%s", "ResourceGUID"), - IKey: fmt.Sprintf("%s", "IKey"), - SDKVersion: fmt.Sprintf("%s", "SDKVersion"), - ReferencedItemId: fmt.Sprintf("%s", "ReferencedItemId"), - ReferencedType: fmt.Sprintf("%s", "ReferencedType"), + PerformanceBucket: fmt.Sprintf("%s", "500ms-1sec"), + Properties: fmt.Sprintf("%s", { "DeploymentId":"523a92fea186461581efca83b7b66a0d", "Stamp":"Breeze-INT-SCUS" }), + Measurements: fmt.Sprintf("%s", { "AvailableMemory": 42.3 }), + OperationName: fmt.Sprintf("%s", "POST /v2/passthrough"), + SyntheticSource: fmt.Sprintf("%s", "Windows"), + SessionId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), + UserId: fmt.Sprintf("%s", "5bfb5187ff9742fbaec5b19dd7217f40"), + UserAuthenticatedId: fmt.Sprintf("%s", "somebody@microsoft.com"), + UserAccountId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), + AppVersion: fmt.Sprintf("%s", "4.2-alpha"), + AppRoleInstance: fmt.Sprintf("%s", "Breeze_IN_42"), + ClientType: fmt.Sprintf("%s", "PC"), + ClientModel: fmt.Sprintf("%s", "Other"), + ClientOS: fmt.Sprintf("%s", "Windows 7"), + ClientIP: fmt.Sprintf("%s", "0.0.0.0"), + ClientCity: fmt.Sprintf("%s", "Sydney"), + ClientStateOrProvince: fmt.Sprintf("%s", "New South Wales"), + ClientCountryOrRegion: fmt.Sprintf("%s", "Australia"), + ClientBrowser: fmt.Sprintf("%s", "Internet Explorer 9.0"), + ResourceGUID: fmt.Sprintf("%s", "d4e6868c-02e8-41d2-a09d-bbb5ae35af5c"), + IKey: fmt.Sprintf("%s", "0539013c-a321-46fd-b831-1cc16729b449"), + SDKVersion: fmt.Sprintf("%s", "dotnet:2.2.0-54037"), + ReferencedItemId: fmt.Sprintf("%s", "905812ce-48c3-44ee-ab93-33e8768f59f9"), + ReferencedType: fmt.Sprintf("%s", "IoTRequests"), // Computer: Computer, //this is the collection agent's computer name, not necessarily to which computer the metric applies to } From 14c0c6d3359c2679a9ae864a2132505301b8121c Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 16:12:04 -0800 Subject: [PATCH 084/175] make dynamic field changes --- source/plugins/go/src/oms.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 1491a8946..7c95ff0a8 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -773,7 +773,26 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri laMetrics = append(laMetrics, &laMetric) // OSM metric population for AppMap + Measurements: fmt.Sprintf("%s", { "AvailableMemory": 42.3 }), metricName := fmt.Sprintf("%s", k) + propertyMap := make(map[string]string) + propertyMap[fmt.Sprintf("DeploymentId")] = "523a92fea186461581efca83b7b66a0d" + propertyMap[fmt.Sprintf("Stamp")] = "Breeze-INT-SCUS" + propertiesJson, err := json.Marshal(&propertyMap) + + if err != nil { + return nil, nil, nil, err + } + + measurementsMap := make(map[string]string) + measurementsMap[fmt.Sprintf("AvailableMemory")] = "423" + measurementsJson, err := json.Marshal(&measurementsMap) + + if err != nil { + return nil, nil, nil, err + } + + if (metricName == "envoy_cluster_upstream_rq_active") && (strings.HasPrefix(metricNamespace, "container.azm.ms.osm")) { if fv > 0 { appName := tagMap["app"] @@ -809,8 +828,8 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri Url: fmt.Sprintf("%s", "https://portal.azure.com"), ResultCode: fmt.Sprintf("%s", "200"), PerformanceBucket: fmt.Sprintf("%s", "500ms-1sec"), - Properties: fmt.Sprintf("%s", { "DeploymentId":"523a92fea186461581efca83b7b66a0d", "Stamp":"Breeze-INT-SCUS" }), - Measurements: fmt.Sprintf("%s", { "AvailableMemory": 42.3 }), + Properties: fmt.Sprintf("%s", propertiesJson), + Measurements: fmt.Sprintf("%s", measurementsJson), OperationName: fmt.Sprintf("%s", "POST /v2/passthrough"), SyntheticSource: fmt.Sprintf("%s", "Windows"), SessionId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), From d7e5b65c6c89910c528ae696ae7401c4029b6111 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 16:12:42 -0800 Subject: [PATCH 085/175] bug fix --- source/plugins/go/src/oms.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 7c95ff0a8..09eedd2f8 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -773,13 +773,12 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri laMetrics = append(laMetrics, &laMetric) // OSM metric population for AppMap - Measurements: fmt.Sprintf("%s", { "AvailableMemory": 42.3 }), metricName := fmt.Sprintf("%s", k) propertyMap := make(map[string]string) propertyMap[fmt.Sprintf("DeploymentId")] = "523a92fea186461581efca83b7b66a0d" propertyMap[fmt.Sprintf("Stamp")] = "Breeze-INT-SCUS" propertiesJson, err := json.Marshal(&propertyMap) - + if err != nil { return nil, nil, nil, err } @@ -787,12 +786,11 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri measurementsMap := make(map[string]string) measurementsMap[fmt.Sprintf("AvailableMemory")] = "423" measurementsJson, err := json.Marshal(&measurementsMap) - + if err != nil { return nil, nil, nil, err } - if (metricName == "envoy_cluster_upstream_rq_active") && (strings.HasPrefix(metricNamespace, "container.azm.ms.osm")) { if fv > 0 { appName := tagMap["app"] From 15be8cd08f245102724f4a49da22dab12e78ecc5 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 17:09:18 -0800 Subject: [PATCH 086/175] adding log type in the header --- source/plugins/go/src/oms.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 09eedd2f8..b3ff18ece 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1020,6 +1020,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //set headers appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("User-Agent", userAgent) + appRequestReq.Header.Set("Log-Type", AppRequestsDataType) appRequestReqID := uuid.New().String() appRequestReq.Header.Set("X-Request-ID", appRequestReqID) @@ -1088,6 +1089,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //set headers appDependencyReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appDependencyReq.Header.Set("User-Agent", userAgent) + appRequestReq.Header.Set("Log-Type", AppRequestsDataType) appDependencyReqID := uuid.New().String() appDependencyReq.Header.Set("X-Request-ID", appDependencyReqID) From eaa6136c06b488a0ddfea25f8fe77625b562c7f5 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Feb 2021 17:57:20 -0800 Subject: [PATCH 087/175] adding al headers --- source/plugins/go/src/oms.go | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index b3ff18ece..3e4ec6b5c 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1021,6 +1021,16 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("User-Agent", userAgent) appRequestReq.Header.Set("Log-Type", AppRequestsDataType) + appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) + appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") + appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") + appRequestReq.Header.Set("ocp-json-nesting-resolution", "records") + appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) + appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) + appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") + appRequestReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") + // appRequestReq.Header.Set("x-ms-ResourceLocation", "records") + appRequestReqID := uuid.New().String() appRequestReq.Header.Set("X-Request-ID", appRequestReqID) @@ -1089,7 +1099,15 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //set headers appDependencyReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appDependencyReq.Header.Set("User-Agent", userAgent) - appRequestReq.Header.Set("Log-Type", AppRequestsDataType) + appRequestReq.Header.Set("Log-Type", AppDependenciesDataType) + appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) + appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") + appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") + appRequestReq.Header.Set("ocp-json-nesting-resolution", "records") + appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) + appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) + appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") + appRequestReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") appDependencyReqID := uuid.New().String() appDependencyReq.Header.Set("X-Request-ID", appDependencyReqID) From b620a25b796c3723e61b93225febf1444e162f59 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 12:46:43 -0800 Subject: [PATCH 088/175] ODS changes --- source/plugins/go/src/oms.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 3e4ec6b5c..96075d845 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1015,7 +1015,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //Post metrics data to LA appRequestReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(requestJsonBytes)) - //req.URL.Query().Add("api-version","2016-04-01") + appRequestReq.URL.Query().Add("api-version", "2016-04-01") //set headers appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) @@ -1029,6 +1029,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") appRequestReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") + appRequestReq.Header.Set("Content-Type", "application/json") + // appRequestReq.Header.Set("Content-Encoding", "gzip") + // appRequestReq.Header.Set("x-ms-ResourceLocation", "records") appRequestReqID := uuid.New().String() @@ -1099,15 +1102,15 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //set headers appDependencyReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appDependencyReq.Header.Set("User-Agent", userAgent) - appRequestReq.Header.Set("Log-Type", AppDependenciesDataType) - appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) - appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") - appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") - appRequestReq.Header.Set("ocp-json-nesting-resolution", "records") - appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) - appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) - appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") - appRequestReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") + appDependencyReq.Header.Set("Log-Type", AppDependenciesDataType) + appDependencyReq.Header.Set("ocp-workspace-id", WorkspaceID) + appDependencyReq.Header.Set("ocp-is-dynamic-data-type", "False") + appDependencyReq.Header.Set("ocp-intelligence-pack-name", "Azure") + appDependencyReq.Header.Set("ocp-json-nesting-resolution", "records") + appDependencyReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) + appDependencyReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) + appDependencyReq.Header.Set("x-ms-OboLocation", "North Europe") + appDependencyReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") appDependencyReqID := uuid.New().String() appDependencyReq.Header.Set("X-Request-ID", appDependencyReqID) From 9f2e9bd6bf603f208551dc03271134387c41b044 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 14:10:22 -0800 Subject: [PATCH 089/175] adding api version --- source/plugins/go/src/oms.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 96075d845..3328cbc57 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1013,9 +1013,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log("AppMapOSMRequestMetrics-json:%v", osmRequestMetrics) //Post metrics data to LA - appRequestReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(requestJsonBytes)) + appRequestReq, _ := http.NewRequest("POST", OMSEndpoint+"?api-version=2016-04-01", bytes.NewBuffer(requestJsonBytes)) - appRequestReq.URL.Query().Add("api-version", "2016-04-01") + //appRequestReq.URL.Query().Add("api-version", "2016-04-01") //set headers appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) From 6bea93738aa706eabbaca62d08296531bc2bb52f Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 14:57:43 -0800 Subject: [PATCH 090/175] modifying app dependency --- source/plugins/go/src/oms.go | 100 ++++++++++++++++++++++++++++------- 1 file changed, 80 insertions(+), 20 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 3328cbc57..6c624f69c 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -265,14 +265,43 @@ type appMapOsmRequestMetric struct { } type appMapOsmDependencyMetric struct { - CollectionTime string `json:"CollectionTime"` - OperationId string `json:"OperationId"` - Id string `json:"Id"` - Target string `json:"Target"` - AppRoleName string `json:"AppRoleName"` - DurationMs float64 `json:"DurationMs"` - Success bool `json:"Success"` - ItemCount int64 `json:"ItemCount"` + time string `json:"time"` + Id string `json:"Id"` + Target string `json:"Target"` + DependencyType string `json:"DependencyType"` + Name string `json:"Name"` + Data string `json:"Data"` + Success bool `json:"Success"` + ResultCode string `json:"ResultCode"` + DurationMs float64 `json:"DurationMs"` + PerformanceBucket string `json:"PerformanceBucket"` + Properties string `json:"Properties"` + Measurements string `json:"Measurements"` + OperationName string `json:"OperationName"` + OperationId string `json:"OperationId"` + ParentId string `json:"ParentId"` + SyntheticSource string `json:"SyntheticSource"` + SessionId string `json:"SessionId"` + UserId string `json:"UserId"` + UserAuthenticatedId string `json:"UserAuthenticatedId"` + UserAccountId string `json:"UserAccountId"` + AppVersion string `json:"AppVersion"` + AppRoleName string `json:"AppRoleName"` + AppRoleInstance string `json:"AppRoleInstance"` + ClientType string `json:"ClientType"` + ClientModel string `json:"ClientModel"` + ClientOS string `json:"ClientOS"` + ClientIP string `json:"ClientIP"` + ClientCity string `json:"ClientCity"` + ClientStateOrProvince string `json:"ClientStateOrProvince"` + ClientCountryOrRegion string `json:"ClientCountryOrRegion"` + ClientBrowser string `json:"ClientBrowser"` + ResourceGUID string `json:"ResourceGUID"` + IKey string `json:"IKey"` + SDKVersion string `json:"SDKVersion"` + ItemCount int64 `json:"ItemCount"` + ReferencedItemId string `json:"ReferencedItemId"` + ReferencedType string `json:"ReferencedType"` } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point @@ -813,8 +842,8 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri osmRequestMetric := appMapOsmRequestMetric{ // Absolutely needed metrics for topology generation for AppMap time: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), - OperationId: fmt.Sprintf("%s", "0d0316964a57404da5f5619287ff2f62"), - ParentId: fmt.Sprintf("%s", "fba578d218224d26a4ca83ba13c354e9"), + OperationId: fmt.Sprintf("%s", operationId), + ParentId: fmt.Sprintf("%s", id), AppRoleName: fmt.Sprintf("%s", destinationAppName), DurationMs: 898.42, Success: success, @@ -856,15 +885,45 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri appMapOsmRequestMetrics = append(appMapOsmRequestMetrics, &osmRequestMetric) osmDependencyMetric := appMapOsmDependencyMetric{ - OperationId: fmt.Sprintf("%s", operationId), - Id: fmt.Sprintf("%s", id), - AppRoleName: fmt.Sprintf("%s", appName), - Target: fmt.Sprintf("%s", destinationAppName), - DurationMs: durationMs, - Success: success, - ItemCount: itemCount, - CollectionTime: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), - // Computer: Computer, //this is the collection agent's computer name, not necessarily to which computer the metric applies to + // Absolutely needed metrics for topology generation for AppMap + time: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), + Id: fmt.Sprintf("%s", id), + Target: fmt.Sprintf("%s", destinationAppName), + Success: success, + DurationMs: 898.42, + OperationId: fmt.Sprintf("%s", operationId), + AppRoleName: fmt.Sprintf("%s", appName), + ItemCount: itemCount, + //metrics to get ingestion working + DependencyType: fmt.Sprintf("%s", "Ajax"), + Name: fmt.Sprintf("%s", "TestData-Request-DataGen"), + Data: fmt.Sprintf("%s", "GET https://n9440-fpj.gmbeelopm.com/HhjmlogpEhiLLL/ECO//GhoppnaBeAelhaekm/3944-40-42J92:22:19.750D/MehgKepmpnlegoDboghnMaedd"), + ResultCode: fmt.Sprintf("%s", "200"), + PerformanceBucket: fmt.Sprintf("%s", "500ms-1sec"), + Properties: fmt.Sprintf("%s", propertiesJson), + Measurements: fmt.Sprintf("%s", measurementsJson), + OperationName: fmt.Sprintf("%s", "POST /v2/passthrough"), + ParentId: fmt.Sprintf("%s", "b1bb1e27-4204-096e-9e89-1f1dfac718fc"), + SyntheticSource: fmt.Sprintf("%s", "Windows"), + SessionId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), + UserId: fmt.Sprintf("%s", "5bfb5187ff9742fbaec5b19dd7217f40"), + UserAuthenticatedId: fmt.Sprintf("%s", "somebody@microsoft.com"), + UserAccountId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), + AppVersion: fmt.Sprintf("%s", "4.2-alpha"), + AppRoleInstance: fmt.Sprintf("%s", "Breeze_IN_42"), + ClientType: fmt.Sprintf("%s", "PC"), + ClientModel: fmt.Sprintf("%s", "Other"), + ClientOS: fmt.Sprintf("%s", "Windows 7"), + ClientIP: fmt.Sprintf("%s", "0.0.0.0"), + ClientCity: fmt.Sprintf("%s", "Sydney"), + ClientStateOrProvince: fmt.Sprintf("%s", "New South Wales"), + ClientCountryOrRegion: fmt.Sprintf("%s", "Australia"), + ClientBrowser: fmt.Sprintf("%s", "Internet Explorer 9.0"), + ResourceGUID: fmt.Sprintf("%s", "d4e6868c-02e8-41d2-a09d-bbb5ae35af5c"), + IKey: fmt.Sprintf("%s", "0539013c-a321-46fd-b831-1cc16729b449"), + SDKVersion: fmt.Sprintf("%s", "dotnet:2.2.0-54037"), + ReferencedItemId: fmt.Sprintf("%s", "905812ce-48c3-44ee-ab93-33e8768f59f9"), + ReferencedType: fmt.Sprintf("%s", "IoTRequests"), } Log("osm dependency metric:%v", osmDependencyMetric) @@ -1095,7 +1154,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } //Post metrics data to LA - appDependencyReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(dependencyJsonBytes)) + appDependencyReq, _ := http.NewRequest("POST", OMSEndpoint+"?api-version=2016-04-01", bytes.NewBuffer(dependencyJsonBytes)) //req.URL.Query().Add("api-version","2016-04-01") @@ -1111,6 +1170,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appDependencyReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) appDependencyReq.Header.Set("x-ms-OboLocation", "North Europe") appDependencyReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") + appDependencyReq.Header.Set("Content-Type", "application/json") appDependencyReqID := uuid.New().String() appDependencyReq.Header.Set("X-Request-ID", appDependencyReqID) From 743c967dc58c9395cbdd5f17e8c678b780a605e9 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 14:59:31 -0800 Subject: [PATCH 091/175] bug fix --- source/plugins/go/src/oms.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 6c624f69c..72c8c8f07 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -826,7 +826,7 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri destinationAppName := tagMap["envoy_cluster_name"] itemCount := int64(1) success := true - durationMs := float64(1.0) + // durationMs := float64(1.0) operationId := uuid.New().String() // if err != nil { // Log("translateTelegrafMetrics::error while generating operationId GUID: %v\n", err) From 1710d5888c335ef0ed205dd0aae3ce37c75603ad Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 15:28:04 -0800 Subject: [PATCH 092/175] changes --- source/plugins/go/src/oms.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 72c8c8f07..771603ae1 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -318,9 +318,9 @@ type AppMapOsmRequestBlob struct { } type AppMapOsmDependencyBlob struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` - DataItems []appMapOsmDependencyMetric `json:"DataItems"` + DataType string `json:"DataType"` + IPName string `json:"IPName"` + records []appMapOsmDependencyMetric `json:"DataItems"` } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point @@ -1083,7 +1083,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") - appRequestReq.Header.Set("ocp-json-nesting-resolution", "records") + appRequestReq.Header.Set("ocp-json-nesting-resolution", "DataItems") appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") @@ -1138,9 +1138,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } osmDependencyMetrics := AppMapOsmDependencyBlob{ - DataType: AppDependenciesDataType, - IPName: "LogManagement", - DataItems: dependencyMetrics} + DataType: AppDependenciesDataType, + IPName: "LogManagement", + records: dependencyMetrics} dependencyJsonBytes, err := json.Marshal(osmDependencyMetrics) Log("AppMapOSMDependencyMetrics-json:%v", osmDependencyMetrics) From b5cb1ca7a24559d7b51acc1acc978bdd628fd4d8 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 18:51:49 -0800 Subject: [PATCH 093/175] changing to generic property bag --- source/plugins/go/src/oms.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 771603ae1..770ea1add 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1079,11 +1079,11 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //set headers appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("User-Agent", userAgent) - appRequestReq.Header.Set("Log-Type", AppRequestsDataType) + // appRequestReq.Header.Set("Log-Type", AppRequestsDataType) appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") - appRequestReq.Header.Set("ocp-json-nesting-resolution", "DataItems") + //appRequestReq.Header.Set("ocp-json-nesting-resolution", "DataItems") appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") From bb274cb4a1b0e258a82050c3566871f08d151ded Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 18:56:31 -0800 Subject: [PATCH 094/175] addng header back --- source/plugins/go/src/oms.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 770ea1add..aec32e4d3 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1083,7 +1083,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") - //appRequestReq.Header.Set("ocp-json-nesting-resolution", "DataItems") + appRequestReq.Header.Set("ocp-json-nesting-resolution", "DataItems") appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") From 0a76b20fae5f561a457210ed5832a3f1c8ca4c58 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 19:27:35 -0800 Subject: [PATCH 095/175] test --- source/plugins/go/src/oms.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index aec32e4d3..4c13d876e 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1056,9 +1056,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } osmRequestMetrics := AppMapOsmRequestBlob{ - DataType: AppRequestsDataType, - IPName: "LogManagement", - DataItems: requestMetrics} + DataType: AppRequestsDataType, + IPName: "LogManagement", + records: requestMetrics} requestJsonBytes, err := json.Marshal(osmRequestMetrics) //Log("app request json bytes: %v", requestJsonBytes) @@ -1072,18 +1072,19 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log("AppMapOSMRequestMetrics-json:%v", osmRequestMetrics) //Post metrics data to LA - appRequestReq, _ := http.NewRequest("POST", OMSEndpoint+"?api-version=2016-04-01", bytes.NewBuffer(requestJsonBytes)) + // appRequestReq, _ := http.NewRequest("POST", OMSEndpoint+"?api-version=2016-04-01", bytes.NewBuffer(requestJsonBytes)) + appRequestReq, _ := http.NewRequest("POST", "https://dd513101-45ad-4dc0-b6dd-42d88361399e.ods.opinsights.azure.com/collector?api-version=2018-05-01", bytes.NewBuffer(requestJsonBytes)) //appRequestReq.URL.Query().Add("api-version", "2016-04-01") //set headers appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("User-Agent", userAgent) - // appRequestReq.Header.Set("Log-Type", AppRequestsDataType) + appRequestReq.Header.Set("Log-Type", AppRequestsDataType) appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") - appRequestReq.Header.Set("ocp-json-nesting-resolution", "DataItems") + appRequestReq.Header.Set("ocp-json-nesting-resolution", "records") appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") From f4686476c6f164bb36a9a80653b2afb78363764c Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Feb 2021 19:29:13 -0800 Subject: [PATCH 096/175] ch --- source/plugins/go/src/oms.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 4c13d876e..c43b17467 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -312,9 +312,9 @@ type InsightsMetricsBlob struct { } type AppMapOsmRequestBlob struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` - DataItems []appMapOsmRequestMetric `json:"DataItems"` + DataType string `json:"DataType"` + IPName string `json:"IPName"` + records []appMapOsmRequestMetric `json:"DataItems"` } type AppMapOsmDependencyBlob struct { From 974486b0f5f8460f033ea10548bb6c03fe38cf7d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Sat, 6 Feb 2021 13:43:50 -0800 Subject: [PATCH 097/175] changes --- source/plugins/go/src/oms.go | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index c43b17467..7f266ef44 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -312,15 +312,15 @@ type InsightsMetricsBlob struct { } type AppMapOsmRequestBlob struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` - records []appMapOsmRequestMetric `json:"DataItems"` + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []appMapOsmRequestMetric `json:"DataItems"` } type AppMapOsmDependencyBlob struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` - records []appMapOsmDependencyMetric `json:"DataItems"` + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []appMapOsmDependencyMetric `json:"DataItems"` } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point @@ -1056,9 +1056,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } osmRequestMetrics := AppMapOsmRequestBlob{ - DataType: AppRequestsDataType, - IPName: "LogManagement", - records: requestMetrics} + DataType: AppRequestsDataType, + IPName: "LogManagement", + DataItems: requestMetrics} requestJsonBytes, err := json.Marshal(osmRequestMetrics) //Log("app request json bytes: %v", requestJsonBytes) @@ -1072,15 +1072,16 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log("AppMapOSMRequestMetrics-json:%v", osmRequestMetrics) //Post metrics data to LA + appRequestReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(requestJsonBytes)) // appRequestReq, _ := http.NewRequest("POST", OMSEndpoint+"?api-version=2016-04-01", bytes.NewBuffer(requestJsonBytes)) - appRequestReq, _ := http.NewRequest("POST", "https://dd513101-45ad-4dc0-b6dd-42d88361399e.ods.opinsights.azure.com/collector?api-version=2018-05-01", bytes.NewBuffer(requestJsonBytes)) + //appRequestReq, _ := http.NewRequest("POST", "https://dd513101-45ad-4dc0-b6dd-42d88361399e.ods.opinsights.azure.com/collector?api-version=2018-05-01", bytes.NewBuffer(requestJsonBytes)) //appRequestReq.URL.Query().Add("api-version", "2016-04-01") //set headers appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appRequestReq.Header.Set("User-Agent", userAgent) - appRequestReq.Header.Set("Log-Type", AppRequestsDataType) + //appRequestReq.Header.Set("Log-Type", AppRequestsDataType) appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") @@ -1139,9 +1140,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } osmDependencyMetrics := AppMapOsmDependencyBlob{ - DataType: AppDependenciesDataType, - IPName: "LogManagement", - records: dependencyMetrics} + DataType: AppDependenciesDataType, + IPName: "LogManagement", + DataItems: dependencyMetrics} dependencyJsonBytes, err := json.Marshal(osmDependencyMetrics) Log("AppMapOSMDependencyMetrics-json:%v", osmDependencyMetrics) @@ -1155,14 +1156,14 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } //Post metrics data to LA - appDependencyReq, _ := http.NewRequest("POST", OMSEndpoint+"?api-version=2016-04-01", bytes.NewBuffer(dependencyJsonBytes)) + appDependencyReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(dependencyJsonBytes)) //req.URL.Query().Add("api-version","2016-04-01") //set headers appDependencyReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) appDependencyReq.Header.Set("User-Agent", userAgent) - appDependencyReq.Header.Set("Log-Type", AppDependenciesDataType) + //appDependencyReq.Header.Set("Log-Type", AppDependenciesDataType) appDependencyReq.Header.Set("ocp-workspace-id", WorkspaceID) appDependencyReq.Header.Set("ocp-is-dynamic-data-type", "False") appDependencyReq.Header.Set("ocp-intelligence-pack-name", "Azure") From 3db82f7a7fa677f088233cbce9d8b41f524ef828 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Feb 2021 13:43:44 -0800 Subject: [PATCH 098/175] undoing appmap changes to keep branch clean --- .../scripts/tomlparser-osm-config.rb | 17 +++--- kubernetes/linux/main.sh | 53 ++++++++++++------- 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 2b8303b94..3b861997d 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,8 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"go_goroutines\"]" - +@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_active\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\", \"envoy_http_downstream_rq_xx\", \"envoy_cluster_downstream_rq\", \"envoy_http_no_route\", \"envoy_server_live\", \"envoy_server_memory_heap_size\", \"envoy_server_memory_physical_size\", \"envoy_server_memory_allocated\", \"envoy_cluster_external_upstream_rq_xx\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@ -140,13 +139,13 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Set OSM namespaces as environment variable so that prometheus custom config parser can read it and add necessary fielddrops to avoid data duplication # of OSM metrics -promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") -if !promSettingsSharedfile.nil? - promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") - # Close file after writing all environment variables - promSettingsSharedfile.close - puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" -end +# promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") +# if !promSettingsSharedfile.nil? +# promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") +# # Close file after writing all environment variables +# promSettingsSharedfile.close +# puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" +# end # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index a0808dbc3..341e578d7 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -221,24 +221,24 @@ fi #Parse the OSM configmap to set the right environment variables for metric collection settings #This needs to be done before the prometheus custom config map parsing since we have namespace duplication logic in place. -if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - - cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source integration_osm_config_env_var - - #Sourcing prometheus side car config settings if it exists - if [ -e "prom_config_shared_settings_env_var" ]; then - cat prom_config_shared_settings_env_var | while read line; do - echo $line >> ~/.bashrc - done - source prom_config_shared_settings_env_var - fi - fi -fi +# if [ ! -e "/etc/config/kube.conf" ]; then +# if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +# /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + +# cat integration_osm_config_env_var | while read line; do +# echo $line >> ~/.bashrc +# done +# source integration_osm_config_env_var + +# #Sourcing prometheus side car config settings if it exists +# # if [ -e "prom_config_shared_settings_env_var" ]; then +# # cat prom_config_shared_settings_env_var | while read line; do +# # echo $line >> ~/.bashrc +# # done +# # source prom_config_shared_settings_env_var +# # fi +# fi +# fi #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb @@ -288,7 +288,24 @@ cat config_metric_collection_env_var | while read line; do done source config_metric_collection_env_var +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + #Sourcing prometheus side car config settings if it exists + # if [ -e "prom_config_shared_settings_env_var" ]; then + # cat prom_config_shared_settings_env_var | while read line; do + # echo $line >> ~/.bashrc + # done + # source prom_config_shared_settings_env_var + # fi + fi +fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" From 1f3f420186191c9404ce36f88be809f6ad3d820a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Feb 2021 13:51:49 -0800 Subject: [PATCH 099/175] undoing oms.go changes --- .../scripts/tomlparser-osm-config.rb | 17 +++--- kubernetes/linux/main.sh | 53 ++++++++++++------- 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 2b8303b94..3b861997d 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,8 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"go_goroutines\"]" - +@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_active\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\", \"envoy_http_downstream_rq_xx\", \"envoy_cluster_downstream_rq\", \"envoy_http_no_route\", \"envoy_server_live\", \"envoy_server_memory_heap_size\", \"envoy_server_memory_physical_size\", \"envoy_server_memory_allocated\", \"envoy_cluster_external_upstream_rq_xx\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" @@ -140,13 +139,13 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Set OSM namespaces as environment variable so that prometheus custom config parser can read it and add necessary fielddrops to avoid data duplication # of OSM metrics -promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") -if !promSettingsSharedfile.nil? - promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") - # Close file after writing all environment variables - promSettingsSharedfile.close - puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" -end +# promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") +# if !promSettingsSharedfile.nil? +# promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") +# # Close file after writing all environment variables +# promSettingsSharedfile.close +# puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" +# end # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index a0808dbc3..341e578d7 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -221,24 +221,24 @@ fi #Parse the OSM configmap to set the right environment variables for metric collection settings #This needs to be done before the prometheus custom config map parsing since we have namespace duplication logic in place. -if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - - cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source integration_osm_config_env_var - - #Sourcing prometheus side car config settings if it exists - if [ -e "prom_config_shared_settings_env_var" ]; then - cat prom_config_shared_settings_env_var | while read line; do - echo $line >> ~/.bashrc - done - source prom_config_shared_settings_env_var - fi - fi -fi +# if [ ! -e "/etc/config/kube.conf" ]; then +# if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +# /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + +# cat integration_osm_config_env_var | while read line; do +# echo $line >> ~/.bashrc +# done +# source integration_osm_config_env_var + +# #Sourcing prometheus side car config settings if it exists +# # if [ -e "prom_config_shared_settings_env_var" ]; then +# # cat prom_config_shared_settings_env_var | while read line; do +# # echo $line >> ~/.bashrc +# # done +# # source prom_config_shared_settings_env_var +# # fi +# fi +# fi #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb @@ -288,7 +288,24 @@ cat config_metric_collection_env_var | while read line; do done source config_metric_collection_env_var +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + #Sourcing prometheus side car config settings if it exists + # if [ -e "prom_config_shared_settings_env_var" ]; then + # cat prom_config_shared_settings_env_var | while read line; do + # echo $line >> ~/.bashrc + # done + # source prom_config_shared_settings_env_var + # fi + fi +fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" From 02a468da8a7da14a39e8f6bfe7406caeddacc24a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Feb 2021 15:48:32 -0800 Subject: [PATCH 100/175] reverting changes for appmap --- source/plugins/go/src/oms.go | 758 ++++++++++------------------------- 1 file changed, 214 insertions(+), 544 deletions(-) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 7f266ef44..d35acad3d 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -32,19 +32,16 @@ import ( // DataType for Container Log const ContainerLogDataType = "CONTAINER_LOG_BLOB" +//DataType for Container Log v2 +const ContainerLogV2DataType = "CONTAINERINSIGHTS_CONTAINERLOGV2" + // DataType for Insights metric const InsightsMetricsDataType = "INSIGHTS_METRICS_BLOB" -// DataType for ApplicationInsights AppRequests -const AppRequestsDataType = "APPLICATIONINSIGHTS_APPREQUESTS" - -// DataType for ApplicationInsights AppDependencies -const AppDependenciesDataType = "APPLICATIONINSIGHTS_APPDEPENDENCIES" - // DataType for KubeMonAgentEvent const KubeMonAgentEventDataType = "KUBE_MON_AGENT_EVENTS_BLOB" -//env varibale which has ResourceId for LA +//env variable which has ResourceId for LA const ResourceIdEnv = "AKS_RESOURCE_ID" //env variable which has ResourceName for NON-AKS @@ -84,20 +81,26 @@ const DaemonSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimpr const ReplicaSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimprov/out_oms.conf" const WindowsContainerLogPluginConfFilePath = "/etc/omsagentwindows/out_oms.conf" -// IPName for Container Log -const IPName = "Containers" +// IPName +const IPName = "ContainerInsights" + + const defaultContainerInventoryRefreshInterval = 60 const kubeMonAgentConfigEventFlushInterval = 60 //Eventsource name in mdsd -const MdsdSourceName = "ContainerLogSource" +const MdsdContainerLogSourceName = "ContainerLogSource" +const MdsdContainerLogV2SourceName = "ContainerLogV2Source" -//container logs route - v2 (v2=flush to oneagent, adx= flush to adx ingestion, anything else flush to ODS[default]) +//container logs route (v2=flush to oneagent, adx= flush to adx ingestion, anything else flush to ODS[default]) const ContainerLogsV2Route = "v2" const ContainerLogsADXRoute = "adx" +//container logs schema (v2=ContainerLogsV2 table in LA, anything else ContainerLogs table in LA. This is applicable only if Container logs route is NOT ADX) +const ContainerLogV2SchemaVersion = "v2" + var ( // PluginConfiguration the plugins configuration PluginConfiguration map[string]string @@ -131,6 +134,8 @@ var ( ContainerLogsRouteV2 bool // container log route for routing thru ADX ContainerLogsRouteADX bool + // container log schema (applicable only for non-ADX route) + ContainerLogSchemaV2 bool //ADX Cluster URI AdxClusterUri string // ADX clientID @@ -186,8 +191,8 @@ var ( userAgent = "" ) -// DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin -type DataItem struct { +// DataItemLAv1 == ContainerLog table in LA +type DataItemLAv1 struct { LogEntry string `json:"LogEntry"` LogEntrySource string `json:"LogEntrySource"` LogEntryTimeStamp string `json:"LogEntryTimeStamp"` @@ -199,17 +204,32 @@ type DataItem struct { Computer string `json:"Computer"` } +// DataItemLAv2 == ContainerLogV2 table in LA +// Please keep the names same as destination column names, to avoid transforming one to another in the pipeline +type DataItemLAv2 struct { + TimeGenerated string `json:"TimeGenerated"` + Computer string `json:"Computer"` + ContainerId string `json:"ContainerId"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` + //PodLabels string `json:"PodLabels"` +} + +// DataItemADX == ContainerLogV2 table in ADX type DataItemADX struct { - TimeGenerated string `json:"TimeGenerated"` - Computer string `json:"Computer"` - ContainerID string `json:"ContainerID"` - ContainerName string `json:"ContainerName"` - PodName string `json:"PodName"` - PodNamespace string `json:"PodNamespace"` - LogMessage string `json:"LogMessage"` - LogSource string `json:"LogSource"` + TimeGenerated string `json:"TimeGenerated"` + Computer string `json:"Computer"` + ContainerId string `json:"ContainerId"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` //PodLabels string `json:"PodLabels"` - AzureResourceId string `json:"AzureResourceId"` + AzureResourceId string `json:"AzureResourceId"` } // telegraf metric DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin @@ -225,85 +245,6 @@ type laTelegrafMetric struct { Computer string `json:"Computer"` } -type appMapOsmRequestMetric struct { - time string `json:"time"` - Id string `json:"Id"` - Source string `json:"Source"` - Name string `json:"Name"` - Url string `json:"Url"` - Success bool `json:"Success"` - ResultCode string `json:"ResultCode"` - DurationMs float64 `json:"DurationMs"` - PerformanceBucket string `json:"PerformanceBucket"` - Properties string `json:"Properties"` - Measurements string `json:"Measurements"` - OperationName string `json:"OperationName"` - OperationId string `json:"OperationId"` - ParentId string `json:"ParentId"` - SyntheticSource string `json:"SyntheticSource"` - SessionId string `json:"SessionId"` - UserId string `json:"UserId"` - UserAuthenticatedId string `json:"UserAuthenticatedId"` - UserAccountId string `json:"UserAccountId"` - AppVersion string `json:"AppVersion"` - AppRoleName string `json:"AppRoleName"` - AppRoleInstance string `json:"AppRoleInstance"` - ClientType string `json:"ClientType"` - ClientModel string `json:"ClientModel"` - ClientOS string `json:"ClientOS"` - ClientIP string `json:"ClientIP"` - ClientCity string `json:"ClientCity"` - ClientStateOrProvince string `json:"ClientStateOrProvince"` - ClientCountryOrRegion string `json:"ClientCountryOrRegion"` - ClientBrowser string `json:"ClientBrowser"` - ResourceGUID string `json:"ResourceGUID"` - IKey string `json:"IKey"` - SDKVersion string `json:"SDKVersion"` - ItemCount int64 `json:"ItemCount"` - ReferencedItemId string `json:"ReferencedItemId"` - ReferencedType string `json:"ReferencedType"` -} - -type appMapOsmDependencyMetric struct { - time string `json:"time"` - Id string `json:"Id"` - Target string `json:"Target"` - DependencyType string `json:"DependencyType"` - Name string `json:"Name"` - Data string `json:"Data"` - Success bool `json:"Success"` - ResultCode string `json:"ResultCode"` - DurationMs float64 `json:"DurationMs"` - PerformanceBucket string `json:"PerformanceBucket"` - Properties string `json:"Properties"` - Measurements string `json:"Measurements"` - OperationName string `json:"OperationName"` - OperationId string `json:"OperationId"` - ParentId string `json:"ParentId"` - SyntheticSource string `json:"SyntheticSource"` - SessionId string `json:"SessionId"` - UserId string `json:"UserId"` - UserAuthenticatedId string `json:"UserAuthenticatedId"` - UserAccountId string `json:"UserAccountId"` - AppVersion string `json:"AppVersion"` - AppRoleName string `json:"AppRoleName"` - AppRoleInstance string `json:"AppRoleInstance"` - ClientType string `json:"ClientType"` - ClientModel string `json:"ClientModel"` - ClientOS string `json:"ClientOS"` - ClientIP string `json:"ClientIP"` - ClientCity string `json:"ClientCity"` - ClientStateOrProvince string `json:"ClientStateOrProvince"` - ClientCountryOrRegion string `json:"ClientCountryOrRegion"` - ClientBrowser string `json:"ClientBrowser"` - ResourceGUID string `json:"ResourceGUID"` - IKey string `json:"IKey"` - SDKVersion string `json:"SDKVersion"` - ItemCount int64 `json:"ItemCount"` - ReferencedItemId string `json:"ReferencedItemId"` - ReferencedType string `json:"ReferencedType"` -} - // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point type InsightsMetricsBlob struct { DataType string `json:"DataType"` @@ -311,23 +252,18 @@ type InsightsMetricsBlob struct { DataItems []laTelegrafMetric `json:"DataItems"` } -type AppMapOsmRequestBlob struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` - DataItems []appMapOsmRequestMetric `json:"DataItems"` -} - -type AppMapOsmDependencyBlob struct { - DataType string `json:"DataType"` - IPName string `json:"IPName"` - DataItems []appMapOsmDependencyMetric `json:"DataItems"` +// ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point +type ContainerLogBlobLAv1 struct { + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []DataItemLAv1 `json:"DataItems"` } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point -type ContainerLogBlob struct { +type ContainerLogBlobLAv2 struct { DataType string `json:"DataType"` IPName string `json:"IPName"` - DataItems []DataItem `json:"DataItems"` + DataItems []DataItemLAv2 `json:"DataItems"` } // MsgPackEntry represents the object corresponding to a single messagepack event in the messagepack stream @@ -423,20 +359,6 @@ func createLogger() *log.Logger { return logger } -// newUUID generates a random UUID according to RFC 4122 -// func newUUID() (string, error) { -// uuid := make([]byte, 16) -// n, err := io.ReadFull(rand.Reader, uuid) -// if n != len(uuid) || err != nil { -// return "", err -// } -// // variant bits; see section 4.1.1 -// uuid[8] = uuid[8]&^0xc0 | 0x80 -// // version 4 (pseudo-random); see section 4.1.3 -// uuid[6] = uuid[6]&^0xf0 | 0x40 -// return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil -// } - func updateContainerImageNameMaps() { for ; true; <-ContainerImageNameRefreshTicker.C { Log("Updating ImageIDMap and NameIDMap") @@ -748,18 +670,12 @@ func flushKubeMonAgentEventRecords() { } //Translates telegraf time series to one or more Azure loganalytics metric(s) -func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetric, []*appMapOsmRequestMetric, []*appMapOsmDependencyMetric, error) { +func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetric, error) { + var laMetrics []*laTelegrafMetric - var appMapOsmRequestMetrics []*appMapOsmRequestMetric - var appMapOsmDependencyMetrics []*appMapOsmDependencyMetric var tags map[interface{}]interface{} - // string appName - // string destinationAppName - // string id - // string operationId tags = m["tags"].(map[interface{}]interface{}) tagMap := make(map[string]string) - metricNamespace := fmt.Sprintf("%s", m["name"]) for k, v := range tags { key := fmt.Sprintf("%s", k) if key == "" { @@ -778,7 +694,7 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri tagJson, err := json.Marshal(&tagMap) if err != nil { - return nil, nil, nil, err + return nil, err } for k, v := range fieldMap { @@ -800,145 +716,13 @@ func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetri //Log ("la metric:%v", laMetric) laMetrics = append(laMetrics, &laMetric) - - // OSM metric population for AppMap - metricName := fmt.Sprintf("%s", k) - propertyMap := make(map[string]string) - propertyMap[fmt.Sprintf("DeploymentId")] = "523a92fea186461581efca83b7b66a0d" - propertyMap[fmt.Sprintf("Stamp")] = "Breeze-INT-SCUS" - propertiesJson, err := json.Marshal(&propertyMap) - - if err != nil { - return nil, nil, nil, err - } - - measurementsMap := make(map[string]string) - measurementsMap[fmt.Sprintf("AvailableMemory")] = "423" - measurementsJson, err := json.Marshal(&measurementsMap) - - if err != nil { - return nil, nil, nil, err - } - - if (metricName == "envoy_cluster_upstream_rq_active") && (strings.HasPrefix(metricNamespace, "container.azm.ms.osm")) { - if fv > 0 { - appName := tagMap["app"] - destinationAppName := tagMap["envoy_cluster_name"] - itemCount := int64(1) - success := true - // durationMs := float64(1.0) - operationId := uuid.New().String() - // if err != nil { - // Log("translateTelegrafMetrics::error while generating operationId GUID: %v\n", err) - // } - // Log("translateTelegrafMetrics::%s\n", operationId) - - id := uuid.New().String() - // if err != nil { - // Log("translateTelegrafMetrics::error while generating id GUID: %v\n", err) - // } - Log("translateTelegrafMetrics::%s\n", id) - collectionTimeValue := m["timestamp"].(uint64) - osmRequestMetric := appMapOsmRequestMetric{ - // Absolutely needed metrics for topology generation for AppMap - time: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), - OperationId: fmt.Sprintf("%s", operationId), - ParentId: fmt.Sprintf("%s", id), - AppRoleName: fmt.Sprintf("%s", destinationAppName), - DurationMs: 898.42, - Success: success, - ItemCount: 42, - //metrics to get ingestion working - Id: fmt.Sprintf("%s", "8be927b9-0bde-4357-87ee-73c13b6f6a05"), - Source: fmt.Sprintf("%s", "Application"), - Name: fmt.Sprintf("%s", "TestData-Request-DataGen"), - Url: fmt.Sprintf("%s", "https://portal.azure.com"), - ResultCode: fmt.Sprintf("%s", "200"), - PerformanceBucket: fmt.Sprintf("%s", "500ms-1sec"), - Properties: fmt.Sprintf("%s", propertiesJson), - Measurements: fmt.Sprintf("%s", measurementsJson), - OperationName: fmt.Sprintf("%s", "POST /v2/passthrough"), - SyntheticSource: fmt.Sprintf("%s", "Windows"), - SessionId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), - UserId: fmt.Sprintf("%s", "5bfb5187ff9742fbaec5b19dd7217f40"), - UserAuthenticatedId: fmt.Sprintf("%s", "somebody@microsoft.com"), - UserAccountId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), - AppVersion: fmt.Sprintf("%s", "4.2-alpha"), - AppRoleInstance: fmt.Sprintf("%s", "Breeze_IN_42"), - ClientType: fmt.Sprintf("%s", "PC"), - ClientModel: fmt.Sprintf("%s", "Other"), - ClientOS: fmt.Sprintf("%s", "Windows 7"), - ClientIP: fmt.Sprintf("%s", "0.0.0.0"), - ClientCity: fmt.Sprintf("%s", "Sydney"), - ClientStateOrProvince: fmt.Sprintf("%s", "New South Wales"), - ClientCountryOrRegion: fmt.Sprintf("%s", "Australia"), - ClientBrowser: fmt.Sprintf("%s", "Internet Explorer 9.0"), - ResourceGUID: fmt.Sprintf("%s", "d4e6868c-02e8-41d2-a09d-bbb5ae35af5c"), - IKey: fmt.Sprintf("%s", "0539013c-a321-46fd-b831-1cc16729b449"), - SDKVersion: fmt.Sprintf("%s", "dotnet:2.2.0-54037"), - ReferencedItemId: fmt.Sprintf("%s", "905812ce-48c3-44ee-ab93-33e8768f59f9"), - ReferencedType: fmt.Sprintf("%s", "IoTRequests"), - // Computer: Computer, //this is the collection agent's computer name, not necessarily to which computer the metric applies to - } - - Log("osm request metric:%v", osmRequestMetric) - appMapOsmRequestMetrics = append(appMapOsmRequestMetrics, &osmRequestMetric) - - osmDependencyMetric := appMapOsmDependencyMetric{ - // Absolutely needed metrics for topology generation for AppMap - time: time.Unix(int64(collectionTimeValue), 0).Format(time.RFC3339), - Id: fmt.Sprintf("%s", id), - Target: fmt.Sprintf("%s", destinationAppName), - Success: success, - DurationMs: 898.42, - OperationId: fmt.Sprintf("%s", operationId), - AppRoleName: fmt.Sprintf("%s", appName), - ItemCount: itemCount, - //metrics to get ingestion working - DependencyType: fmt.Sprintf("%s", "Ajax"), - Name: fmt.Sprintf("%s", "TestData-Request-DataGen"), - Data: fmt.Sprintf("%s", "GET https://n9440-fpj.gmbeelopm.com/HhjmlogpEhiLLL/ECO//GhoppnaBeAelhaekm/3944-40-42J92:22:19.750D/MehgKepmpnlegoDboghnMaedd"), - ResultCode: fmt.Sprintf("%s", "200"), - PerformanceBucket: fmt.Sprintf("%s", "500ms-1sec"), - Properties: fmt.Sprintf("%s", propertiesJson), - Measurements: fmt.Sprintf("%s", measurementsJson), - OperationName: fmt.Sprintf("%s", "POST /v2/passthrough"), - ParentId: fmt.Sprintf("%s", "b1bb1e27-4204-096e-9e89-1f1dfac718fc"), - SyntheticSource: fmt.Sprintf("%s", "Windows"), - SessionId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), - UserId: fmt.Sprintf("%s", "5bfb5187ff9742fbaec5b19dd7217f40"), - UserAuthenticatedId: fmt.Sprintf("%s", "somebody@microsoft.com"), - UserAccountId: fmt.Sprintf("%s", "e357297720214cdc818565f89cfad359"), - AppVersion: fmt.Sprintf("%s", "4.2-alpha"), - AppRoleInstance: fmt.Sprintf("%s", "Breeze_IN_42"), - ClientType: fmt.Sprintf("%s", "PC"), - ClientModel: fmt.Sprintf("%s", "Other"), - ClientOS: fmt.Sprintf("%s", "Windows 7"), - ClientIP: fmt.Sprintf("%s", "0.0.0.0"), - ClientCity: fmt.Sprintf("%s", "Sydney"), - ClientStateOrProvince: fmt.Sprintf("%s", "New South Wales"), - ClientCountryOrRegion: fmt.Sprintf("%s", "Australia"), - ClientBrowser: fmt.Sprintf("%s", "Internet Explorer 9.0"), - ResourceGUID: fmt.Sprintf("%s", "d4e6868c-02e8-41d2-a09d-bbb5ae35af5c"), - IKey: fmt.Sprintf("%s", "0539013c-a321-46fd-b831-1cc16729b449"), - SDKVersion: fmt.Sprintf("%s", "dotnet:2.2.0-54037"), - ReferencedItemId: fmt.Sprintf("%s", "905812ce-48c3-44ee-ab93-33e8768f59f9"), - ReferencedType: fmt.Sprintf("%s", "IoTRequests"), - } - - Log("osm dependency metric:%v", osmDependencyMetric) - appMapOsmDependencyMetrics = append(appMapOsmDependencyMetrics, &osmDependencyMetric) - } - } } - return laMetrics, appMapOsmRequestMetrics, appMapOsmDependencyMetrics, nil + return laMetrics, nil } // send metrics from Telegraf to LA. 1) Translate telegraf timeseries to LA metric(s) 2) Send it to LA as 'InsightsMetrics' fixed type func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int { var laMetrics []*laTelegrafMetric - var appMapOsmRequestMetrics []*appMapOsmRequestMetric - var appMapOsmDependencyMetrics []*appMapOsmDependencyMetric if (telegrafRecords == nil) || !(len(telegrafRecords) > 0) { Log("PostTelegrafMetricsToLA::Error:no timeseries to derive") @@ -946,15 +730,13 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } for _, record := range telegrafRecords { - translatedMetrics, osmRequestMetrics, osmDependencyMetrics, err := translateTelegrafMetrics(record) + translatedMetrics, err := translateTelegrafMetrics(record) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when translating telegraf metric to log analytics metric %q", err) Log(message) //SendException(message) //This will be too noisy } laMetrics = append(laMetrics, translatedMetrics...) - appMapOsmRequestMetrics = append(appMapOsmRequestMetrics, osmRequestMetrics...) - appMapOsmDependencyMetrics = append(appMapOsmDependencyMetrics, osmDependencyMetrics...) } if (laMetrics == nil) || !(len(laMetrics) > 0) { @@ -965,22 +747,6 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log(message) } - if (appMapOsmRequestMetrics == nil) || !(len(appMapOsmRequestMetrics) > 0) { - Log("PostTelegrafMetricsToLA::Info:no OSM request metrics derived from timeseries data") - return output.FLB_OK - } else { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived osm request %v metrics from %v timeseries", len(appMapOsmRequestMetrics), len(telegrafRecords)) - Log(message) - } - - if (appMapOsmDependencyMetrics == nil) || !(len(appMapOsmDependencyMetrics) > 0) { - Log("PostTelegrafMetricsToLA::Info:no OSM dependency metrics derived from timeseries data") - return output.FLB_OK - } else { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived osm dependency %v metrics from %v timeseries", len(appMapOsmDependencyMetrics), len(telegrafRecords)) - Log(message) - } - var metrics []laTelegrafMetric var i int @@ -994,7 +760,6 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int DataItems: metrics} jsonBytes, err := json.Marshal(laTelegrafMetrics) - //Log("laTelegrafMetrics-json:%v", laTelegrafMetrics) if err != nil { message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling json %q", err) @@ -1005,7 +770,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int //Post metrics data to LA req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(jsonBytes)) - //Log("LA request json bytes: %v", jsonBytes) + //req.URL.Query().Add("api-version","2016-04-01") //set headers @@ -1044,171 +809,8 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int numMetrics := len(laMetrics) UpdateNumTelegrafMetricsSentTelemetry(numMetrics, 0, 0) - Log("PostTelegrafMetricsToLA::Info:LArequests:Http Request: %v", req) Log("PostTelegrafMetricsToLA::Info:Successfully flushed %v records in %v", numMetrics, elapsed) - // AppMap Requests - var requestMetrics []appMapOsmRequestMetric - var j int - - for j = 0; j < len(appMapOsmRequestMetrics); j++ { - requestMetrics = append(requestMetrics, *appMapOsmRequestMetrics[j]) - } - - osmRequestMetrics := AppMapOsmRequestBlob{ - DataType: AppRequestsDataType, - IPName: "LogManagement", - DataItems: requestMetrics} - - requestJsonBytes, err := json.Marshal(osmRequestMetrics) - //Log("app request json bytes: %v", requestJsonBytes) - - if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app requests json %q", err) - Log(message) - SendException(message) - return output.FLB_OK - } - Log("AppMapOSMRequestMetrics-json:%v", osmRequestMetrics) - - //Post metrics data to LA - appRequestReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(requestJsonBytes)) - // appRequestReq, _ := http.NewRequest("POST", OMSEndpoint+"?api-version=2016-04-01", bytes.NewBuffer(requestJsonBytes)) - //appRequestReq, _ := http.NewRequest("POST", "https://dd513101-45ad-4dc0-b6dd-42d88361399e.ods.opinsights.azure.com/collector?api-version=2018-05-01", bytes.NewBuffer(requestJsonBytes)) - - //appRequestReq.URL.Query().Add("api-version", "2016-04-01") - - //set headers - appRequestReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) - appRequestReq.Header.Set("User-Agent", userAgent) - //appRequestReq.Header.Set("Log-Type", AppRequestsDataType) - appRequestReq.Header.Set("ocp-workspace-id", WorkspaceID) - appRequestReq.Header.Set("ocp-is-dynamic-data-type", "False") - appRequestReq.Header.Set("ocp-intelligence-pack-name", "Azure") - appRequestReq.Header.Set("ocp-json-nesting-resolution", "records") - appRequestReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) - appRequestReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) - appRequestReq.Header.Set("x-ms-OboLocation", "North Europe") - appRequestReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") - appRequestReq.Header.Set("Content-Type", "application/json") - // appRequestReq.Header.Set("Content-Encoding", "gzip") - - // appRequestReq.Header.Set("x-ms-ResourceLocation", "records") - - appRequestReqID := uuid.New().String() - appRequestReq.Header.Set("X-Request-ID", appRequestReqID) - - //expensive to do string len for every request, so use a flag - if ResourceCentric == true { - appRequestReq.Header.Set("x-ms-AzureResourceId", ResourceID) - } - - reqStart := time.Now() - appRequestResp, err := HTTPClient.Do(appRequestReq) - reqElapsed := time.Since(reqStart) - - if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending apprequest %v metrics. duration:%v err:%q \n", len(appMapOsmRequestMetrics), reqElapsed, err.Error()) - Log(message) - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) - return output.FLB_RETRY - } - - if appRequestResp == nil || appRequestResp.StatusCode != 200 { - if appRequestResp != nil { - Log("PostTelegrafMetricsToLA::Error:(retriable) app requests RequestID %s Response Status %v Status Code %v", appRequestReqID, appRequestResp.Status, appRequestResp.StatusCode) - } - if appRequestResp != nil && appRequestResp.StatusCode == 429 { - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1) - } - return output.FLB_RETRY - } - - defer appRequestResp.Body.Close() - - appRequestNumMetrics := len(appMapOsmRequestMetrics) - UpdateNumTelegrafMetricsSentTelemetry(appRequestNumMetrics, 0, 0) - Log("PostTelegrafMetricsToLA::Info:AppRequests:Http Request: %v", appRequestReq) - Log("PostTelegrafMetricsToLA::Info:AppRequests:Successfully flushed %v records in %v with status code %v", appRequestNumMetrics, reqElapsed, appRequestResp.StatusCode) - - // AppMap Dependencies - var dependencyMetrics []appMapOsmDependencyMetric - var myint int - - for myint = 0; myint < len(appMapOsmDependencyMetrics); myint++ { - dependencyMetrics = append(dependencyMetrics, *appMapOsmDependencyMetrics[myint]) - } - - osmDependencyMetrics := AppMapOsmDependencyBlob{ - DataType: AppDependenciesDataType, - IPName: "LogManagement", - DataItems: dependencyMetrics} - - dependencyJsonBytes, err := json.Marshal(osmDependencyMetrics) - Log("AppMapOSMDependencyMetrics-json:%v", osmDependencyMetrics) - //Log("app dependency json bytes: %v", dependencyJsonBytes) - - if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:when marshalling app dependencies json %q", err) - Log(message) - SendException(message) - return output.FLB_OK - } - - //Post metrics data to LA - appDependencyReq, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(dependencyJsonBytes)) - - //req.URL.Query().Add("api-version","2016-04-01") - - //set headers - appDependencyReq.Header.Set("x-ms-date", time.Now().Format(time.RFC3339)) - appDependencyReq.Header.Set("User-Agent", userAgent) - //appDependencyReq.Header.Set("Log-Type", AppDependenciesDataType) - appDependencyReq.Header.Set("ocp-workspace-id", WorkspaceID) - appDependencyReq.Header.Set("ocp-is-dynamic-data-type", "False") - appDependencyReq.Header.Set("ocp-intelligence-pack-name", "Azure") - appDependencyReq.Header.Set("ocp-json-nesting-resolution", "records") - appDependencyReq.Header.Set("time-generated-field", time.Now().Format(time.RFC3339)) - appDependencyReq.Header.Set("data-available-time", time.Now().Format(time.RFC3339)) - appDependencyReq.Header.Set("x-ms-OboLocation", "North Europe") - appDependencyReq.Header.Set("x-ms-ServiceIdentity", "ApplicationInsights") - appDependencyReq.Header.Set("Content-Type", "application/json") - appDependencyReqID := uuid.New().String() - appDependencyReq.Header.Set("X-Request-ID", appDependencyReqID) - - //expensive to do string len for every request, so use a flag - if ResourceCentric == true { - appDependencyReq.Header.Set("x-ms-AzureResourceId", ResourceID) - } - - depStart := time.Now() - appDependencyResp, err := HTTPClient.Do(appDependencyReq) - depElapsed := time.Since(depStart) - - if err != nil { - message := fmt.Sprintf("PostTelegrafMetricsToLA::Error:(retriable) when sending appdependency %v metrics. duration:%v err:%q \n", len(appMapOsmDependencyMetrics), elapsed, err.Error()) - Log(message) - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 0) - return output.FLB_RETRY - } - - if appDependencyResp == nil || appDependencyResp.StatusCode != 200 { - if appDependencyResp != nil { - Log("PostTelegrafMetricsToLA::Error:(retriable) app dependency RequestID %s Response Status %v Status Code %v", appDependencyReqID, appDependencyResp.Status, appDependencyResp.StatusCode) - } - if appDependencyResp != nil && appDependencyResp.StatusCode == 429 { - UpdateNumTelegrafMetricsSentTelemetry(0, 1, 1) - } - return output.FLB_RETRY - } - - defer appDependencyResp.Body.Close() - - appDependencyNumMetrics := len(appMapOsmDependencyMetrics) - UpdateNumTelegrafMetricsSentTelemetry(appDependencyNumMetrics, 0, 0) - Log("PostTelegrafMetricsToLA::Info:AppDependency:Http Request: %v", appDependencyReq) - Log("PostTelegrafMetricsToLA::Info:AppDependency:Successfully flushed %v records in %v with status code - %v", appDependencyNumMetrics, depElapsed, appDependencyResp.StatusCode) - return output.FLB_OK } @@ -1223,7 +825,8 @@ func UpdateNumTelegrafMetricsSentTelemetry(numMetricsSent int, numSendErrors int // PostDataHelper sends data to the ODS endpoint or oneagent or ADX func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { start := time.Now() - var dataItems []DataItem + var dataItemsLAv1 []DataItemLAv1 + var dataItemsLAv2 []DataItemLAv2 var dataItemsADX []DataItemADX var msgPackEntries []MsgPackEntry @@ -1261,26 +864,42 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } stringMap = make(map[string]string) + //below id & name are used by latency telemetry in both v1 & v2 LA schemas + id := "" + name := "" logEntry := ToString(record["log"]) logEntryTimeStamp := ToString(record["time"]) - stringMap["LogEntry"] = logEntry - stringMap["LogEntrySource"] = logEntrySource - stringMap["LogEntryTimeStamp"] = logEntryTimeStamp - stringMap["SourceSystem"] = "Containers" - stringMap["Id"] = containerID - - if val, ok := imageIDMap[containerID]; ok { - stringMap["Image"] = val - } + //ADX Schema & LAv2 schema are almost the same (except resourceId) + if (ContainerLogSchemaV2 == true || ContainerLogsRouteADX == true) { + stringMap["Computer"] = Computer + stringMap["ContainerId"] = containerID + stringMap["ContainerName"] = containerName + stringMap["PodName"] = k8sPodName + stringMap["PodNamespace"] = k8sNamespace + stringMap["LogMessage"] = logEntry + stringMap["LogSource"] = logEntrySource + stringMap["TimeGenerated"] = logEntryTimeStamp + } else { + stringMap["LogEntry"] = logEntry + stringMap["LogEntrySource"] = logEntrySource + stringMap["LogEntryTimeStamp"] = logEntryTimeStamp + stringMap["SourceSystem"] = "Containers" + stringMap["Id"] = containerID + + if val, ok := imageIDMap[containerID]; ok { + stringMap["Image"] = val + } - if val, ok := nameIDMap[containerID]; ok { - stringMap["Name"] = val - } + if val, ok := nameIDMap[containerID]; ok { + stringMap["Name"] = val + } - stringMap["TimeOfCommand"] = start.Format(time.RFC3339) - stringMap["Computer"] = Computer - var dataItem DataItem + stringMap["TimeOfCommand"] = start.Format(time.RFC3339) + stringMap["Computer"] = Computer + } + var dataItemLAv1 DataItemLAv1 + var dataItemLAv2 DataItemLAv2 var dataItemADX DataItemADX var msgPackEntry MsgPackEntry @@ -1297,50 +916,68 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } else if ContainerLogsRouteADX == true { if ResourceCentric == true { stringMap["AzureResourceId"] = ResourceID + } else { + stringMap["AzureResourceId"] = "" } - stringMap["PodName"] = k8sPodName - stringMap["PodNamespace"] = k8sNamespace - stringMap["ContainerName"] = containerName dataItemADX = DataItemADX{ - TimeGenerated: stringMap["LogEntryTimeStamp"], - Computer: stringMap["Computer"], - ContainerID: stringMap["Id"], - ContainerName: stringMap["ContainerName"], - PodName: stringMap["PodName"], - PodNamespace: stringMap["PodNamespace"], - LogMessage: stringMap["LogEntry"], - LogSource: stringMap["LogEntrySource"], - AzureResourceId: stringMap["AzureResourceId"], + TimeGenerated: stringMap["TimeGenerated"], + Computer: stringMap["Computer"], + ContainerId: stringMap["ContainerId"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], + AzureResourceId: stringMap["AzureResourceId"], } //ADX dataItemsADX = append(dataItemsADX, dataItemADX) } else { - dataItem = DataItem{ - ID: stringMap["Id"], - LogEntry: stringMap["LogEntry"], - LogEntrySource: stringMap["LogEntrySource"], - LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], - LogEntryTimeOfCommand: stringMap["TimeOfCommand"], - SourceSystem: stringMap["SourceSystem"], - Computer: stringMap["Computer"], - Image: stringMap["Image"], - Name: stringMap["Name"], + if (ContainerLogSchemaV2 == true) { + dataItemLAv2 = DataItemLAv2{ + TimeGenerated: stringMap["TimeGenerated"], + Computer: stringMap["Computer"], + ContainerId: stringMap["ContainerId"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], + } + //ODS-v2 schema + dataItemsLAv2 = append(dataItemsLAv2, dataItemLAv2) + name = stringMap["ContainerName"] + id = stringMap["ContainerId"] + } else { + dataItemLAv1 = DataItemLAv1{ + ID: stringMap["Id"], + LogEntry: stringMap["LogEntry"], + LogEntrySource: stringMap["LogEntrySource"], + LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], + LogEntryTimeOfCommand: stringMap["TimeOfCommand"], + SourceSystem: stringMap["SourceSystem"], + Computer: stringMap["Computer"], + Image: stringMap["Image"], + Name: stringMap["Name"], + } + //ODS-v1 schema + dataItemsLAv1 = append(dataItemsLAv1, dataItemLAv1) + name = stringMap["Name"] + id = stringMap["Id"] } - //ODS - dataItems = append(dataItems, dataItem) } - if stringMap["LogEntryTimeStamp"] != "" { - loggedTime, e := time.Parse(time.RFC3339, stringMap["LogEntryTimeStamp"]) + if logEntryTimeStamp != "" { + loggedTime, e := time.Parse(time.RFC3339, logEntryTimeStamp) if e != nil { - message := fmt.Sprintf("Error while converting LogEntryTimeStamp for telemetry purposes: %s", e.Error()) + message := fmt.Sprintf("Error while converting logEntryTimeStamp for telemetry purposes: %s", e.Error()) Log(message) SendException(message) } else { ltncy := float64(start.Sub(loggedTime) / time.Millisecond) if ltncy >= maxLatency { maxLatency = ltncy - maxLatencyContainer = dataItem.Name + "=" + dataItem.ID + maxLatencyContainer = name + "=" + id } } } @@ -1350,8 +987,12 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { //flush to mdsd + mdsdSourceName := MdsdContainerLogSourceName + if (ContainerLogSchemaV2 == true) { + mdsdSourceName = MdsdContainerLogV2SourceName + } fluentForward := MsgPackForward{ - Tag: MdsdSourceName, + Tag: mdsdSourceName, Entries: msgPackEntries, } @@ -1398,7 +1039,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { elapsed = time.Since(start) if er != nil { - Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(dataItems), elapsed, er.Error()) + Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) if MdsdMsgpUnixSocketClient != nil { MdsdMsgpUnixSocketClient.Close() MdsdMsgpUnixSocketClient = nil @@ -1444,14 +1085,14 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } } - // Setup a maximum time for completion to be 15 Seconds. + // Setup a maximum time for completion to be 30 Seconds. ctx, cancel := context.WithTimeout(ParentContext, 30*time.Second) defer cancel() //ADXFlushMutex.Lock() //defer ADXFlushMutex.Unlock() //MultiJSON support is not there yet - if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogv2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { + if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogV2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { Log("Error when streaming to ADX Ingestion: %s", ingestionErr.Error()) //ADXIngestor = nil //not required as per ADX team. Will keep it to indicate that we tried this approach @@ -1466,58 +1107,75 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = len(dataItemsADX) Log("Success::ADX::Successfully wrote %d container log records to ADX in %s", numContainerLogRecords, elapsed) - } else { - //flush to ODS - if len(dataItems) > 0 { - logEntry := ContainerLogBlob{ - DataType: ContainerLogDataType, + } else { //ODS + var logEntry interface{} + recordType := "" + loglinesCount := 0 + //schema v2 + if (len(dataItemsLAv2) > 0 && ContainerLogSchemaV2 == true) { + logEntry = ContainerLogBlobLAv2{ + DataType: ContainerLogV2DataType, IPName: IPName, - DataItems: dataItems} - - marshalled, err := json.Marshal(logEntry) - if err != nil { - message := fmt.Sprintf("Error while Marshalling log Entry: %s", err.Error()) - Log(message) - SendException(message) - return output.FLB_OK + DataItems: dataItemsLAv2} + loglinesCount = len(dataItemsLAv2) + recordType = "ContainerLogV2" + } else { + //schema v1 + if len(dataItemsLAv1) > 0 { + logEntry = ContainerLogBlobLAv1{ + DataType: ContainerLogDataType, + IPName: IPName, + DataItems: dataItemsLAv1} + loglinesCount = len(dataItemsLAv1) + recordType = "ContainerLog" } + } - req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", userAgent) - reqId := uuid.New().String() - req.Header.Set("X-Request-ID", reqId) - //expensive to do string len for every request, so use a flag - if ResourceCentric == true { - req.Header.Set("x-ms-AzureResourceId", ResourceID) - } + marshalled, err := json.Marshal(logEntry) + //Log("LogEntry::e %s", marshalled) + if err != nil { + message := fmt.Sprintf("Error while Marshalling log Entry: %s", err.Error()) + Log(message) + SendException(message) + return output.FLB_OK + } - resp, err := HTTPClient.Do(req) - elapsed = time.Since(start) + req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", userAgent) + reqId := uuid.New().String() + req.Header.Set("X-Request-ID", reqId) + //expensive to do string len for every request, so use a flag + if ResourceCentric == true { + req.Header.Set("x-ms-AzureResourceId", ResourceID) + } + + resp, err := HTTPClient.Do(req) + elapsed = time.Since(start) - if err != nil { - message := fmt.Sprintf("Error when sending request %s \n", err.Error()) - Log(message) - // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation - //SendException(message) - Log("Failed to flush %d records after %s", len(dataItems), elapsed) + if err != nil { + message := fmt.Sprintf("Error when sending request %s \n", err.Error()) + Log(message) + // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation + //SendException(message) + + Log("Failed to flush %d records after %s", loglinesCount, elapsed) - return output.FLB_RETRY - } + return output.FLB_RETRY + } - if resp == nil || resp.StatusCode != 200 { - if resp != nil { - Log("RequestId %s Status %s Status Code %d", reqId, resp.Status, resp.StatusCode) - } - return output.FLB_RETRY + if resp == nil || resp.StatusCode != 200 { + if resp != nil { + Log("RequestId %s Status %s Status Code %d", reqId, resp.Status, resp.StatusCode) } + return output.FLB_RETRY + } - defer resp.Body.Close() - numContainerLogRecords = len(dataItems) - Log("PostDataHelper::Info::Successfully flushed %d container log records to ODS in %s", numContainerLogRecords, elapsed) + defer resp.Body.Close() + numContainerLogRecords = loglinesCount + Log("PostDataHelper::Info::Successfully flushed %d %s records to ODS in %s", numContainerLogRecords, recordType, elapsed) } - } ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() @@ -1805,10 +1463,22 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { CreateADXClient() } + ContainerLogSchemaVersion := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOG_SCHEMA_VERSION"))) + Log("AZMON_CONTAINER_LOG_SCHEMA_VERSION:%s", ContainerLogSchemaVersion) + + ContainerLogSchemaV2 = false //default is v1 schema + + if strings.Compare(ContainerLogSchemaVersion, ContainerLogV2SchemaVersion) == 0 && ContainerLogsRouteADX != true { + ContainerLogSchemaV2 = true + Log("Container logs schema=%s", ContainerLogV2SchemaVersion) + fmt.Fprintf(os.Stdout, "Container logs schema=%s... \n", ContainerLogV2SchemaVersion) + } + if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { populateExcludedStdoutNamespaces() populateExcludedStderrNamespaces() - if enrichContainerLogs == true && ContainerLogsRouteADX != true { + //enrichment not applicable for ADX and v2 schema + if enrichContainerLogs == true && ContainerLogsRouteADX != true && ContainerLogSchemaV2 != true { Log("ContainerLogEnrichment=true; starting goroutine to update containerimagenamemaps \n") go updateContainerImageNameMaps() } else { @@ -1821,4 +1491,4 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Running in replicaset. Disabling container enrichment caching & updates \n") } -} +} \ No newline at end of file From 564ad1b9060057d2401f9e31fa5698ae193ad186 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Feb 2021 16:23:21 -0800 Subject: [PATCH 101/175] updating metric names --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 3b861997d..02196b05e 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,7 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_active\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\", \"envoy_http_downstream_rq_xx\", \"envoy_cluster_downstream_rq\", \"envoy_http_no_route\", \"envoy_server_live\", \"envoy_server_memory_heap_size\", \"envoy_server_memory_physical_size\", \"envoy_server_memory_allocated\", \"envoy_cluster_external_upstream_rq_xx\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From f262d49e210b84de1e20e552b82a60534320ba1a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Feb 2021 18:03:45 -0800 Subject: [PATCH 102/175] add forward slash to osm prometheus tag --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 02196b05e..774ec2b78 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -113,7 +113,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) namespace.strip! if namespace.length > 0 osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] - name_prefix=\"container.azm.ms.osm\" + name_prefix=\"container.azm.ms.osm/\" interval = \"#{@scrapeInterval}\" monitor_kubernetes_pods = true monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} From 5bfa4e46714a6ab561ee81bff5643cd21a912dd2 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Feb 2021 18:35:56 -0800 Subject: [PATCH 103/175] fixing metric name for latency --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 774ec2b78..bdd989ac3 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,7 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_external_upstream_rq_time_bucket\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From 256cfe9415f6d488e81d6625467c1f11a4821e00 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 11 Feb 2021 11:24:01 -0800 Subject: [PATCH 104/175] fixing metric name --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index bdd989ac3..3fdc9281b 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,7 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_external_upstream_rq_time_bucket\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time_bucket\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From 651ff8dd20ab86bbff1961b41534cb6dde6d931c Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 11 Feb 2021 14:39:43 -0800 Subject: [PATCH 105/175] moving config replace to its own method --- .../scripts/tomlparser-osm-config.rb | 115 +++++++++++------- 1 file changed, 73 insertions(+), 42 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 3fdc9281b..9967b55d6 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -81,6 +81,45 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end +def replaceOsmTelegrafConfigPlaceHolders + begin + #replace place holders in configuration file + tgfConfig = File.read(@tgfTestConfigFile) #read returns only after closing the file + + if @osmMetricNamespaces.length > 0 + osmPluginConfigsWithNamespaces = "" + @osmMetricNamespaces.each do |namespace| + if !namespace.nil? + #Stripping namespaces to remove leading and trailing whitespaces + namespace.strip! + if namespace.length > 0 + osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + name_prefix=\"container.azm.ms.osm/\" + interval = \"#{@scrapeInterval}\" + monitor_kubernetes_pods = true + monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} + monitor_kubernetes_pods_namespace = \"#{namespace}\" + fieldpass = #{@fieldPassSetting} + metric_version = #{@metricVersion} + url_tag = \"#{@urlTag}\" + bearer_token = \"#{@bearerToken}\" + response_timeout = \"#{@responseTimeout}\" + tls_ca = \"#{@tlsCa}\" + insecure_skip_verify = #{@insecureSkipVerify}\n" + end + end + end + tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) + else + puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" + tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") + end + rescue => errorStr + # TODO: test this scenario out + puts "config::osm::error:Exception while replacing telegraf configuration settings for osm - #{errorStr}, using defaults" + end +end + @osmConfigSchemaVersion = ENV["AZMON_OSM_CFG_SCHEMA_VERSION"] puts "****************Start OSM Config Processing********************" if !@osmConfigSchemaVersion.nil? && !@osmConfigSchemaVersion.empty? && @osmConfigSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it @@ -102,51 +141,43 @@ def populateSettingValuesFromConfigMap(parsedConfig) FileUtils.cp(@tgfConfigFileSidecar, @tgfTestConfigFile) end -#replace place holders in configuration file -tgfConfig = File.read(@tgfTestConfigFile) #read returns only after closing the file - -if @osmMetricNamespaces.length > 0 - osmPluginConfigsWithNamespaces = "" - @osmMetricNamespaces.each do |namespace| - if !namespace.nil? - #Stripping namespaces to remove leading and trailing whitespaces - namespace.strip! - if namespace.length > 0 - osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] - name_prefix=\"container.azm.ms.osm/\" - interval = \"#{@scrapeInterval}\" - monitor_kubernetes_pods = true - monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} - monitor_kubernetes_pods_namespace = \"#{namespace}\" - fieldpass = #{@fieldPassSetting} - metric_version = #{@metricVersion} - url_tag = \"#{@urlTag}\" - bearer_token = \"#{@bearerToken}\" - response_timeout = \"#{@responseTimeout}\" - tls_ca = \"#{@tlsCa}\" - insecure_skip_verify = #{@insecureSkipVerify}\n" - end - end - end - tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) -else - puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" - tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") -end +replaceOsmTelegrafConfigPlaceHolders() + +# #replace place holders in configuration file +# tgfConfig = File.read(@tgfTestConfigFile) #read returns only after closing the file + +# if @osmMetricNamespaces.length > 0 +# osmPluginConfigsWithNamespaces = "" +# @osmMetricNamespaces.each do |namespace| +# if !namespace.nil? +# #Stripping namespaces to remove leading and trailing whitespaces +# namespace.strip! +# if namespace.length > 0 +# osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] +# name_prefix=\"container.azm.ms.osm/\" +# interval = \"#{@scrapeInterval}\" +# monitor_kubernetes_pods = true +# monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} +# monitor_kubernetes_pods_namespace = \"#{namespace}\" +# fieldpass = #{@fieldPassSetting} +# metric_version = #{@metricVersion} +# url_tag = \"#{@urlTag}\" +# bearer_token = \"#{@bearerToken}\" +# response_timeout = \"#{@responseTimeout}\" +# tls_ca = \"#{@tlsCa}\" +# insecure_skip_verify = #{@insecureSkipVerify}\n" +# end +# end +# end +# tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) +# else +# puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" +# tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") +# end File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" -# Set OSM namespaces as environment variable so that prometheus custom config parser can read it and add necessary fielddrops to avoid data duplication -# of OSM metrics -# promSettingsSharedfile = File.open("prom_config_shared_settings_env_var", "w") -# if !promSettingsSharedfile.nil? -# promSettingsSharedfile.write("export AZMON_OSM_METRIC_NAMESPACES=#{@osmMetricNamespaces}\n") -# # Close file after writing all environment variables -# promSettingsSharedfile.close -# puts "config::Successfully created prom_config_shared_settings_env_var file for prometheus sidecar" -# end - # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") @@ -155,6 +186,6 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Close file after writing all environment variables telemetryFile.close else - puts "config::npm::Exception while opening file for writing OSM telemetry environment variables" + puts "config::osm::Exception while opening file for writing OSM telemetry environment variables" end puts "****************End OSM Config Processing********************" From 3700391b6e1a119af9758af05a8da03c09964e17 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 11:44:16 -0800 Subject: [PATCH 106/175] adding bytes metrics --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 9967b55d6..14cf360ef 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -21,7 +21,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time_bucket\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time_bucket\", \"envoy_cluster_upstream_cx_rx_bytes_total\", \"envoy_cluster_upstream_cx_tx_bytes_total\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From de25b22e3b807fa3008bacda4a4463966945710d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 12:05:55 -0800 Subject: [PATCH 107/175] bug fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 14cf360ef..bd5322e70 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -114,6 +114,8 @@ def replaceOsmTelegrafConfigPlaceHolders puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") end + File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope + puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" rescue => errorStr # TODO: test this scenario out puts "config::osm::error:Exception while replacing telegraf configuration settings for osm - #{errorStr}, using defaults" @@ -175,8 +177,8 @@ def replaceOsmTelegrafConfigPlaceHolders # tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") # end -File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope -puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" +# File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope +# puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") From 4d880b2cd043b6d07192d78a88d3cc0d92130161 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 19:12:13 -0800 Subject: [PATCH 108/175] telemetry changes --- .../scripts/tomlparser-prom-customconfig.rb | 3 - source/plugins/go/src/telemetry.go | 140 +++++++++++++----- source/plugins/ruby/in_kube_nodes.rb | 4 - 3 files changed, 105 insertions(+), 42 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 204235ebb..0dee40b03 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -273,10 +273,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) #Set environment variables for telemetry file = File.open("telemetry_prom_config_env_var", "w") if !file.nil? - file.write("export TELEMETRY_SIDECAR_PROM_INTERVAL=\"#{interval}\"\n") #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_SIDECAR_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectors.length}\"\n") diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 3d30ac5aa..84250fef1 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -10,9 +10,9 @@ import ( "strings" "time" + "github.com/fluent/fluent-bit-go/output" "github.com/microsoft/ApplicationInsights-Go/appinsights" "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" - "github.com/fluent/fluent-bit-go/output" ) var ( @@ -44,33 +44,44 @@ var ( ContainerLogsMDSDClientCreateErrors float64 //Tracks the number of write/send errors to ADX for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsSendErrorsToADXFromFluent float64 - //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) + //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsADXClientCreateErrors float64 + //Tracks the number of OSM namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + OSMNamespaceCount int64 + //Tracks whether monitor kubernetes pods is set to true and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPods string + //Tracks the number of monitor kubernetes pods namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsNamespaceLength int64 + //Tracks the number of monitor kubernetes pods label selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsLabelSelectorLength int64 + //Tracks the number of monitor kubernetes pods field selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsFieldSelectorLength int64 ) const ( - clusterTypeACS = "ACS" - clusterTypeAKS = "AKS" - envAKSResourceID = "AKS_RESOURCE_ID" - envACSResourceName = "ACS_RESOURCE_NAME" - envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" - envAppInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" - metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" - metricNameAvgLogGenerationRate = "ContainerLogsGeneratedPerSec" - metricNameLogSize = "ContainerLogsSize" - metricNameAgentLogProcessingMaxLatencyMs = "ContainerLogsAgentSideLatencyMs" - metricNameNumberofTelegrafMetricsSentSuccessfully = "TelegrafMetricsSentCount" - metricNameNumberofSendErrorsTelegrafMetrics = "TelegrafMetricsSendErrorCount" - metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" - metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" - metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" - metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" - metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" + clusterTypeACS = "ACS" + clusterTypeAKS = "AKS" + envAKSResourceID = "AKS_RESOURCE_ID" + envACSResourceName = "ACS_RESOURCE_NAME" + envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" + envAppInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" + metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" + metricNameAvgLogGenerationRate = "ContainerLogsGeneratedPerSec" + metricNameLogSize = "ContainerLogsSize" + metricNameAgentLogProcessingMaxLatencyMs = "ContainerLogsAgentSideLatencyMs" + metricNameNumberofTelegrafMetricsSentSuccessfully = "TelegrafMetricsSentCount" + metricNameNumberofSendErrorsTelegrafMetrics = "TelegrafMetricsSendErrorCount" + metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" + metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" + metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" + metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" + metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" defaultTelemetryPushIntervalSeconds = 300 - eventNameContainerLogInit = "ContainerLogPluginInitialized" - eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" + eventNameContainerLogInit = "ContainerLogPluginInitialized" + eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" + eventNamePrometheusSidecarHeartbeat = "PrometheusSidecarHeartbeatEvent" ) // SendContainerLogPluginMetrics is a go-routine that flushes the data periodically (every 5 mins to App Insights) @@ -100,6 +111,11 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { containerLogsMDSDClientCreateErrors := ContainerLogsMDSDClientCreateErrors containerLogsSendErrorsToADXFromFluent := ContainerLogsSendErrorsToADXFromFluent containerLogsADXClientCreateErrors := ContainerLogsADXClientCreateErrors + osmNamespaceCount := OSMNamespaceCount + promMonitorPods := PromMonitorPods + promMonitorPodsNamespaceLength := PromMonitorPodsNamespaceLength + promMonitorPodsLabelSelectorLength := PromMonitorPodsLabelSelectorLength + promMonitorPodsFieldSelectorLength := PromMonitorPodsFieldSelectorLength TelegrafMetricsSentCount = 0.0 TelegrafMetricsSendErrorCount = 0.0 @@ -118,17 +134,42 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { - SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) - flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) - TelemetryClient.Track(flushRateMetric) - logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) - logSizeMetric := appinsights.NewMetricTelemetry(metricNameLogSize, logSizeRate) - TelemetryClient.Track(logRateMetric) - Log("Log Size Rate: %f\n", logSizeRate) - TelemetryClient.Track(logSizeMetric) - logLatencyMetric := appinsights.NewMetricTelemetry(metricNameAgentLogProcessingMaxLatencyMs, logLatencyMs) - logLatencyMetric.Properties["Container"] = logLatencyMsContainer - TelemetryClient.Track(logLatencyMetric) + if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheus-sidecar") == 0 { + telemetryDimensions := make(map[string]string) + telemetryDimensions["ContainerType"] = "prometheus-sidecar" + telemetryDimensions["SidecarPromMonitorPods"] = PromMonitorPods + if (PromMonitorPodsNamespaceLength > 0) + { + telemetryDimensions["SidecarPromMonitorPodsNamespaceLength"] = PromMonitorPodsNamespaceLength + } + if (promMonitorPodsLabelSelectorLength > 0) + { + telemetryDimensions["SidecarPromMonitorPodsLabelSelectorLength"] = promMonitorPodsLabelSelectorLength + } + if (promMonitorPodsFieldSelectorLength > 0) + { + telemetryDimensions["SidecarPromMonitorPodsFieldSelectorLength"] = promMonitorPodsFieldSelectorLength + } + if (osmNamespaceCount > 0) + { + telemetryDimensions["OsmNamespaceCount"] = osmNamespaceCount + } + + SendEvent(eventNamePrometheusSidecarHeartbeat, telemetryDimensions) + + } else { + SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) + flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) + TelemetryClient.Track(flushRateMetric) + logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) + logSizeMetric := appinsights.NewMetricTelemetry(metricNameLogSize, logSizeRate) + TelemetryClient.Track(logRateMetric) + Log("Log Size Rate: %f\n", logSizeRate) + TelemetryClient.Track(logSizeMetric) + logLatencyMetric := appinsights.NewMetricTelemetry(metricNameAgentLogProcessingMaxLatencyMs, logLatencyMs) + logLatencyMetric.Properties["Container"] = logLatencyMsContainer + TelemetryClient.Track(logLatencyMetric) + } } TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameNumberofTelegrafMetricsSentSuccessfully, telegrafMetricsSentCount)) if telegrafMetricsSendErrorCount > 0.0 { @@ -255,12 +296,41 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { } if isProxyConfigured == true { - CommonProperties["IsProxyConfigured"] = "true" + CommonProperties["IsProxyConfigured"] = "true" } else { - CommonProperties["IsProxyConfigured"] = "false" - } + CommonProperties["IsProxyConfigured"] = "false" + } TelemetryClient.Context().CommonProperties = CommonProperties + + // Getting the namespace count, monitor kubernetes pods values and namespace count once at start because it wont change unless the configmap is applied and the container is restarted + osmNsCount, err := os.Getenv("TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT") + OSMNamespaceCount, err = strconv.Atoi(osmNsCount) + if err != nil { + Log("OSM namespace count string to int conversion error %s", err.Error()) + OSMNamespaceCount = 0 + } + PromMonitorPods = os.Getenv("TELEMETRY_SIDECAR_PROM_MONITOR_PODS") + promMonPodsNamespaceLength := os.Getenv("TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH") + PromMonitorPodsNamespaceLength, err = strconv.Atoi(promMonPodsNamespaceLength) + if err != nil { + Log("Prometheus sidecar monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) + PromMonitorPodsNamespaceLength = 0 + } + promLabelSelectorLength := os.Getenv("TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH") + PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) + if err != nil { + Log("Prometheus sidecar label selector count string to int conversion error %s", err.Error()) + PromMonitorPodsLabelSelectorLength = 0 + } + + promFieldSelectorLength := os.Getenv("TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH") + PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) + if err != nil { + Log("Prometheus sidecar field selector count string to int conversion error %s", err.Error()) + PromMonitorPodsFieldSelectorLength = 0 + } + return 0, nil } diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 0a4727077..4af9a2d20 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -17,8 +17,6 @@ class Kube_nodeInventory_Input < Input @@rsPromFieldDropCount = ENV["TELEMETRY_RS_PROM_FIELDDROP_LENGTH"] @@rsPromK8sServiceCount = ENV["TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH"] @@rsPromUrlCount = ENV["TELEMETRY_RS_PROM_URLS_LENGTH"] - @@rsPromMonitorPods = ENV["TELEMETRY_RS_PROM_MONITOR_PODS"] - @@rsPromMonitorPodsNamespaceLength = ENV["TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH"] @@collectAllKubeEvents = ENV["AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS"] def initialize @@ -285,8 +283,6 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromFDC"] = @@rsPromFieldDropCount properties["rsPromServ"] = @@rsPromK8sServiceCount properties["rsPromUrl"] = @@rsPromUrlCount - properties["rsPromMonPods"] = @@rsPromMonitorPods - properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) telemetrySent = true From 357281c7a4ea79cac83ae9e5a57a281eaf605356 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 19:14:45 -0800 Subject: [PATCH 109/175] fixing formatting --- source/plugins/go/src/telemetry.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 84250fef1..7861a22f1 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -138,24 +138,20 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { telemetryDimensions := make(map[string]string) telemetryDimensions["ContainerType"] = "prometheus-sidecar" telemetryDimensions["SidecarPromMonitorPods"] = PromMonitorPods - if (PromMonitorPodsNamespaceLength > 0) - { - telemetryDimensions["SidecarPromMonitorPodsNamespaceLength"] = PromMonitorPodsNamespaceLength + if promMonitorPodsNamespaceLength > 0 { + telemetryDimensions["SidecarPromMonitorPodsNamespaceLength"] = promMonitorPodsNamespaceLength } - if (promMonitorPodsLabelSelectorLength > 0) - { + if promMonitorPodsLabelSelectorLength > 0 { telemetryDimensions["SidecarPromMonitorPodsLabelSelectorLength"] = promMonitorPodsLabelSelectorLength } - if (promMonitorPodsFieldSelectorLength > 0) - { + if promMonitorPodsFieldSelectorLength > 0 { telemetryDimensions["SidecarPromMonitorPodsFieldSelectorLength"] = promMonitorPodsFieldSelectorLength } - if (osmNamespaceCount > 0) - { + if osmNamespaceCount > 0 { telemetryDimensions["OsmNamespaceCount"] = osmNamespaceCount } - SendEvent(eventNamePrometheusSidecarHeartbeat, telemetryDimensions) + SendEvent(eventNamePrometheusSidecarHeartbeat, telemetryDimensions) } else { SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) @@ -330,7 +326,7 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { Log("Prometheus sidecar field selector count string to int conversion error %s", err.Error()) PromMonitorPodsFieldSelectorLength = 0 } - + return 0, nil } From 7e1b1480166b0070c31cf853be3c94d9496dfee3 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 19:16:37 -0800 Subject: [PATCH 110/175] integer to string --- source/plugins/go/src/telemetry.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 7861a22f1..86db7782d 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -139,16 +139,16 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { telemetryDimensions["ContainerType"] = "prometheus-sidecar" telemetryDimensions["SidecarPromMonitorPods"] = PromMonitorPods if promMonitorPodsNamespaceLength > 0 { - telemetryDimensions["SidecarPromMonitorPodsNamespaceLength"] = promMonitorPodsNamespaceLength + telemetryDimensions["SidecarPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) } if promMonitorPodsLabelSelectorLength > 0 { - telemetryDimensions["SidecarPromMonitorPodsLabelSelectorLength"] = promMonitorPodsLabelSelectorLength + telemetryDimensions["SidecarPromMonitorPodsLabelSelectorLength"] = strconv.Itoa(promMonitorPodsLabelSelectorLength) } if promMonitorPodsFieldSelectorLength > 0 { - telemetryDimensions["SidecarPromMonitorPodsFieldSelectorLength"] = promMonitorPodsFieldSelectorLength + telemetryDimensions["SidecarPromMonitorPodsFieldSelectorLength"] = strconv.Itoa(promMonitorPodsFieldSelectorLength) } if osmNamespaceCount > 0 { - telemetryDimensions["OsmNamespaceCount"] = osmNamespaceCount + telemetryDimensions["OsmNamespaceCount"] = strconv.Itoa(osmNamespaceCount) } SendEvent(eventNamePrometheusSidecarHeartbeat, telemetryDimensions) From b517f26403f4f5bc967293f53c7950ff8c72c068 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 19:17:44 -0800 Subject: [PATCH 111/175] change to integer --- source/plugins/go/src/telemetry.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 86db7782d..9110b15e4 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -47,15 +47,15 @@ var ( //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsADXClientCreateErrors float64 //Tracks the number of OSM namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) - OSMNamespaceCount int64 + OSMNamespaceCount int //Tracks whether monitor kubernetes pods is set to true and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) PromMonitorPods string //Tracks the number of monitor kubernetes pods namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) - PromMonitorPodsNamespaceLength int64 + PromMonitorPodsNamespaceLength int //Tracks the number of monitor kubernetes pods label selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) - PromMonitorPodsLabelSelectorLength int64 + PromMonitorPodsLabelSelectorLength int //Tracks the number of monitor kubernetes pods field selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) - PromMonitorPodsFieldSelectorLength int64 + PromMonitorPodsFieldSelectorLength int ) const ( From b3a82d9a4ed29a44404a1d85076c2d77ddff1b64 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 19:18:31 -0800 Subject: [PATCH 112/175] bug fix --- source/plugins/go/src/telemetry.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 9110b15e4..ac7980e58 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -300,7 +300,7 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { TelemetryClient.Context().CommonProperties = CommonProperties // Getting the namespace count, monitor kubernetes pods values and namespace count once at start because it wont change unless the configmap is applied and the container is restarted - osmNsCount, err := os.Getenv("TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT") + osmNsCount := os.Getenv("TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT") OSMNamespaceCount, err = strconv.Atoi(osmNsCount) if err != nil { Log("OSM namespace count string to int conversion error %s", err.Error()) From 76625a00852c22166cf6c620c5b24fd2b0472e65 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 12 Feb 2021 19:19:15 -0800 Subject: [PATCH 113/175] bug fix --- source/plugins/go/src/telemetry.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index ac7980e58..644cbe45a 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -137,7 +137,7 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheus-sidecar") == 0 { telemetryDimensions := make(map[string]string) telemetryDimensions["ContainerType"] = "prometheus-sidecar" - telemetryDimensions["SidecarPromMonitorPods"] = PromMonitorPods + telemetryDimensions["SidecarPromMonitorPods"] = promMonitorPods if promMonitorPodsNamespaceLength > 0 { telemetryDimensions["SidecarPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) } From 21ae27069704e3d7cdf130a5e3d777fa35e9d659 Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Wed, 17 Feb 2021 17:01:33 -0800 Subject: [PATCH 114/175] fix field and label selector telemetry --- .../linux/installer/scripts/tomlparser-prom-customconfig.rb | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 0dee40b03..879f3d459 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -267,6 +267,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = 0 end + kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(',').length File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for prometheus side car" @@ -276,8 +278,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) #Setting array lengths as environment variables for telemetry purposes file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectors.length}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectors.length}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") # Close file after writing all environment variables file.close From fd3f6953e92af638a8f18f68fe96fa2fdab13aab Mon Sep 17 00:00:00 2001 From: Grace Wehner Date: Thu, 18 Feb 2021 16:36:08 -0800 Subject: [PATCH 115/175] Add comments to clarify label/field selector telemetry --- build/linux/installer/scripts/tomlparser-prom-customconfig.rb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 879f3d459..0666b1300 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -267,6 +267,9 @@ def populateSettingValuesFromConfigMap(parsedConfig) new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = 0 end + + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. + # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(',').length From 87fd6d89cc141651f5b8e4df658a1803685e0a0b Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Feb 2021 10:39:57 -0800 Subject: [PATCH 116/175] changes to install telegraf on windows container --- build/windows/installer/conf/telegraf.conf | 846 +++++++++++++++++++++ kubernetes/windows/main.ps1 | 53 +- kubernetes/windows/setup.ps1 | 16 + 3 files changed, 889 insertions(+), 26 deletions(-) create mode 100644 build/windows/installer/conf/telegraf.conf diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf new file mode 100644 index 000000000..809f3538e --- /dev/null +++ b/build/windows/installer/conf/telegraf.conf @@ -0,0 +1,846 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + #Below are entirely used for telemetry + #AgentVersion = "$AGENT_VERSION" + #AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" + #ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" + #Region = "$TELEMETRY_AKS_REGION" + #ClusterName = "$TELEMETRY_CLUSTER_NAME" + #ClusterType = "$TELEMETRY_CLUSTER_TYPE" + #Computer = "placeholder_hostname" + #ControllerType = "$CONTROLLER_TYPE" + + hostName = "placeholder_hostname" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "15s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = true + ## Run telegraf in quiet mode (error log messages only). + quiet = false + ## Specify the log file name. The empty string means to log to stderr. + logfile = "/opt/new-telegraf-logs.txt" + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Generic socket writer capable of handling multiple socket types. +[[outputs.socket_writer]] + ## URL to connect to + address = "tcp://0.0.0.0:25229" + # address = "tcp://example.com:http" + # address = "tcp4://127.0.0.1:8094" + # address = "tcp6://127.0.0.1:8094" + # address = "tcp6://[2001:db8::1]:8094" + # address = "udp://127.0.0.1:8094" + # address = "udp4://127.0.0.1:8094" + # address = "udp6://127.0.0.1:8094" + # address = "unix:///tmp/telegraf.sock" + # address = "unixgram:///tmp/telegraf.sock" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## Data format to generate. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + namedrop = ["agent_telemetry"] + #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] + +# # Output to send MDM metrics to fluent bit and then route it to fluentD +# [[outputs.socket_writer]] +# ## URL to connect to +# address = "tcp://0.0.0.0:25228" +# # address = "tcp://example.com:http" +# # address = "tcp4://127.0.0.1:8094" +# # address = "tcp6://127.0.0.1:8094" +# # address = "tcp6://[2001:db8::1]:8094" +# # address = "udp://127.0.0.1:8094" +# # address = "udp4://127.0.0.1:8094" +# # address = "udp6://127.0.0.1:8094" +# # address = "unix:///tmp/telegraf.sock" +# # address = "unixgram:///tmp/telegraf.sock" + +# ## Optional TLS Config +# # tls_ca = "/etc/telegraf/ca.pem" +# # tls_cert = "/etc/telegraf/cert.pem" +# # tls_key = "/etc/telegraf/key.pem" +# ## Use TLS but skip chain & host verification +# # insecure_skip_verify = false + +# ## Period between keep alive probes. +# ## Only applies to TCP sockets. +# ## 0 disables keep alive probes. +# ## Defaults to the OS configuration. +# # keep_alive_period = "5m" + +# ## Data format to generate. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "json" +# namepass = ["container.azm.ms/disk"] +# #fieldpass = ["used_percent"] + +[[outputs.application_insights]] + ## Instrumentation key of the Application Insights resource. + instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" + + ## Timeout for closing (default: 5s). + # timeout = "5s" + + ## Enable additional diagnostic logging. + # enable_diagnostic_logging = false + + ## Context Tag Sources add Application Insights context tags to a tag value. + ## + ## For list of allowed context tag keys see: + ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go + # [outputs.application_insights.context_tag_sources] + # "ai.cloud.role" = "kubernetes_container_name" + # "ai.cloud.roleInstance" = "kubernetes_pod_name" + namepass = ["agent_telemetry"] + #tagdrop = ["nodeName"] + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +[[processors.converter]] + [processors.converter.fields] + float = ["*"] +# # Perform string processing on tags, fields, and measurements +#[[processors.rename]] + #[[processors.rename.replace]] + # measurement = "disk" + # dest = "nodes" +# [[processors.rename.replace]] +# field = "free" +# dest = "freeBytes" +# [[processors.rename.replace]] +# field = "used" +# dest = "usedBytes" +# [[processors.rename.replace]] +# field = "used_percent" +# dest = "usedPercentage" + #[[processors.rename.replace]] + # measurement = "net" + # dest = "nodes" + #[[processors.rename.replace]] + # field = "bytes_recv" + # dest = "networkBytesReceivedTotal" + #[[processors.rename.replace]] + # field = "bytes_sent" + # dest = "networkBytesSentTotal" + #[[processors.rename.replace]] + # field = "err_in" + # dest = "networkErrorsInTotal" + #[[processors.rename.replace]] + # field = "err_out" + # dest = "networkErrorsOutTotal" + #[[processors.rename.replace]] + # measurement = "kubernetes_pod_volume" + # dest = "pods" + #[[processors.rename.replace]] + # field = "used_bytes" + # dest = "podVolumeUsedBytes" + #[[processors.rename.replace]] + # field = "available_bytes" + # dest = "podVolumeAvailableBytes" + #[[processors.rename.replace]] + # measurement = "kubernetes_pod_network" + # dest = "pods" + #[[processors.rename.replace]] + # field = "tx_errors" + # dest = "podNetworkTxErrorsTotal" + #[[processors.rename.replace]] + # field = "rx_errors" + # dest = "podNetworkRxErrorsTotal" + #[[processors.rename.replace]] + # tag = "volume_name" + # dest = "volumeName" + #[[processors.rename.replace]] + # tag = "pod_name" + # dest = "podName" + #[[processors.rename.replace]] + # measurement = "docker" + # dest = "containers" + #[[processors.rename.replace]] + # measurement = "docker_container_status" + # dest = "containers" + #[[processors.rename.replace]] + # field = "n_containers" + # dest = "numContainers" + #[[processors.rename.replace]] + # field = "n_containers_running" + # dest = "numContainersRunning" + #[[processors.rename.replace]] + # field = "n_containers_stopped" + # dest = "numContainersStopped" + #[[processors.rename.replace]] + # field = "n_containers_paused" + # dest = "numContainersPaused" + #[[processors.rename.replace]] + # field = "n_images" + # dest = "numContainerImages" + +# ## Convert a tag value to uppercase +# # [[processors.strings.uppercase]] +# # tag = "method" +# +# ## Convert a field value to lowercase and store in a new field +# # [[processors.strings.lowercase]] +# # field = "uri_stem" +# # dest = "uri_stem_normalised" +# +# ## Trim leading and trailing whitespace using the default cutset +# # [[processors.strings.trim]] +# # field = "message" +# +# ## Trim leading characters in cutset +# # [[processors.strings.trim_left]] +# # field = "message" +# # cutset = "\t" +# +# ## Trim trailing characters in cutset +# # [[processors.strings.trim_right]] +# # field = "message" +# # cutset = "\r\n" +# +# ## Trim the given prefix from the field +# # [[processors.strings.trim_prefix]] +# # field = "my_value" +# # prefix = "my_" +# +# ## Trim the given suffix from the field +# # [[processors.strings.trim_suffix]] +# # field = "read_count" +# # suffix = "_count" + + +# # Print all metrics that pass through this filter. +# [[processors.topk]] +# ## How many seconds between aggregations +# # period = 10 +# +# ## How many top metrics to return +# # k = 10 +# +# ## Over which tags should the aggregation be done. Globs can be specified, in +# ## which case any tag matching the glob will aggregated over. If set to an +# ## empty list is no aggregation over tags is done +# # group_by = ['*'] +# +# ## Over which fields are the top k are calculated +# # fields = ["value"] +# +# ## What aggregation to use. Options: sum, mean, min, max +# # aggregation = "mean" +# +# ## Instead of the top k largest metrics, return the bottom k lowest metrics +# # bottomk = false +# +# ## The plugin assigns each metric a GroupBy tag generated from its name and +# ## tags. If this setting is different than "" the plugin will add a +# ## tag (which name will be the value of this setting) to each metric with +# ## the value of the calculated GroupBy tag. Useful for debugging +# # add_groupby_tag = "" +# +# ## These settings provide a way to know the position of each metric in +# ## the top k. The 'add_rank_field' setting allows to specify for which +# ## fields the position is required. If the list is non empty, then a field +# ## will be added to each and every metric for each string present in this +# ## setting. This field will contain the ranking of the group that +# ## the metric belonged to when aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_rank' +# # add_rank_fields = [] +# +# ## These settings provide a way to know what values the plugin is generating +# ## when aggregating metrics. The 'add_agregate_field' setting allows to +# ## specify for which fields the final aggregation value is required. If the +# ## list is non empty, then a field will be added to each every metric for +# ## each field present in this setting. This field will contain +# ## the computed aggregation for the group that the metric belonged to when +# ## aggregated over that field. +# ## The name of the field will be set to the name of the aggregation field, +# ## suffixed with the string '_topk_aggregate' +# # add_aggregate_fields = [] + + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + +# # Keep the aggregate basicstats of each metric passing through. +# [[aggregators.basicstats]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Create aggregate histograms. +# [[aggregators.histogram]] +# ## The period in which to flush the aggregator. +# period = "30s" +# +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# +# ## Example config that aggregates all fields of the metric. +# # [[aggregators.histogram.config]] +# # ## The set of buckets. +# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] +# # ## The name of metric. +# # measurement_name = "cpu" +# +# ## Example config that aggregates only specific fields of the metric. +# # [[aggregators.histogram.config]] +# # ## The set of buckets. +# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] +# # ## The name of metric. +# # measurement_name = "diskio" +# # ## The concrete fields of metric +# # fields = ["io_time", "read_time", "write_time"] + + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + +# # Count the occurance of values in fields. +# [[aggregators.valuecounter]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false +# ## The fields for which the values will be counted +# fields = [] + + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +# Read metrics about cpu usage +#[[inputs.cpu]] + ## Whether to report per-cpu stats or not +# percpu = false + ## Whether to report total system cpu stats or not +# totalcpu = true + ## If true, collect raw CPU time metrics. +# collect_cpu_time = false + ## If true, compute and report the sum of all non-idle CPU states. +# report_active = true +# fieldpass = ["usage_active","cluster","node","host","device"] +# taginclude = ["cluster","cpu","node"] + + + +# Read metrics about disk usage by mount point +# [[inputs.disk]] +# name_prefix="container.azm.ms/" +# ## By default stats will be gathered for all mount points. +# ## Set mount_points will restrict the stats to only the specified mount points. +# # mount_points = ["/"] + +# ## Ignore mount points by filesystem type. +# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] +# fieldpass = ["free", "used", "used_percent"] +# taginclude = ["device","path","hostName"] +# # Below due to Bug - https://github.com/influxdata/telegraf/issues/5615 +# # ORDER matters here!! - i.e the below should be the LAST modifier +# [inputs.disk.tagdrop] +# path = ["/var/lib/kubelet*", "/dev/termination-log", "/var/log", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/etc/kubernetes/host", "/var/lib/docker/containers", "/etc/config/settings"] + + +# # Read metrics about memory usage +# #[[inputs.mem]] +# # fieldpass = ["used_percent", "cluster", "node","host","device"] +# # taginclude = ["cluster","node"] + +# # Read metrics about disk IO by device +# [[inputs.diskio]] +# name_prefix="container.azm.ms/" +# ## By default, telegraf will gather stats for all devices including +# ## disk partitions. +# ## Setting devices will restrict the stats to the specified devices. +# devices = ["sd[a-z][0-9]"] +# ## Uncomment the following line if you need disk serial numbers. +# # skip_serial_number = false +# # +# ## On systems which support it, device metadata can be added in the form of +# ## tags. +# ## Currently only Linux is supported via udev properties. You can view +# ## available properties for a device by running: +# ## 'udevadm info -q property -n /dev/sda' +# ## Note: Most, but not all, udev properties can be accessed this way. Properties +# ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. +# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] +# # +# ## Using the same metadata source as device_tags, you can also customize the +# ## name of the device via templates. +# ## The 'name_templates' parameter is a list of templates to try and apply to +# ## the device. The template may contain variables in the form of '$PROPERTY' or +# ## '${PROPERTY}'. The first template which does not contain any variables not +# ## present for the device is used as the device name tag. +# ## The typical use case is for LVM volumes, to get the VG/LV name instead of +# ## the near-meaningless DM-0 name. +# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] +# fieldpass = ["reads", "read_bytes", "read_time", "writes", "write_bytes", "write_time", "io_time", "iops_in_progress"] +# taginclude = ["name","hostName"] + +# # Read metrics about network interface usage +# [[inputs.net]] +# name_prefix="container.azm.ms/" +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] +# ## +# ## On linux systems telegraf also collects protocol stats. +# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. +# ## +# ignore_protocol_stats = true +# ## +# fieldpass = ["bytes_recv", "bytes_sent", "err_in", "err_out"] +# taginclude = ["interface","hostName"] + +# Read metrics from the kubernetes kubelet api +#[[inputs.kubernetes]] + ## URL for the kubelet + #url = "http://1.1.1.1:10255" +# url = "http://placeholder_nodeip:10255" + + ## Use bearer token for authorization + # bearer_token = /path/to/bearer/token + + ## Set response_timeout (default 5 seconds) + # response_timeout = "5s" + + ## Optional TLS Config + # tls_ca = /path/to/cafile + # tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +# fieldpass = ["used_bytes", "available_bytes", "tx_errors", "rx_errors" ] +# taginclude = ["volume_name","nodeName","namespace","pod_name"] +# Read metrics about docker containers +#[[inputs.docker]] + ## Docker Endpoint + ## To use TCP, set endpoint = "tcp://[ip]:[port]" + ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/host/docker.sock" + + ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) +# gather_services = false + + ## Only collect metrics for these containers, collect all if empty +# container_names = [] + + ## Containers to include and exclude. Globs accepted. + ## Note that an empty array for both will include all containers +# container_name_include = [] +# container_name_exclude = [] + + ## Container states to include and exclude. Globs accepted. + ## When empty only containers in the "running" state will be captured. +# container_state_include = ['*'] + # container_state_exclude = [] + + ## Timeout for docker list, info, and stats commands +# timeout = "5s" + + ## Whether to report for each container per-device blkio (8:0, 8:1...) and + ## network (eth0, eth1, ...) stats or not +# perdevice = true + ## Whether to report for each container total blkio and network stats or not +# total = true + ## Which environment variables should we use as a tag + ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] + + ## docker labels to include and exclude as tags. Globs accepted. + ## Note that an empty array for both will include all labels as tags +# docker_label_include = [] +# docker_label_exclude = [] + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false +# fieldpass = ["n_containers", "n_containers_running", "n_containers_stopped", "n_containers_paused", "n_images"] + #fieldpass = ["numContainers", "numContainersRunning", "numContainersStopped", "numContainersPaused", "numContainerImages"] +# taginclude = ["nodeName"] + +#[[inputs.procstat]] +# #name_prefix="t.azm.ms/" +# exe = "mdsd" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# [[inputs.procstat]] +# #name_prefix="container.azm.ms/" +# exe = "ruby" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# [[inputs.procstat]] +# #name_prefix="container.azm.ms/" +# exe = "td-agent-bit" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# [[inputs.procstat]] +# #name_prefix="container.azm.ms/" +# exe = "telegraf" +# interval = "10s" +# pid_finder = "native" +# pid_tag = true +# name_override = "agent_telemetry" +# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] +# [inputs.procstat.tags] +# Computer = "$NODE_NAME" +# AgentVersion = "$AGENT_VERSION" +# ControllerType = "$CONTROLLER_TYPE" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" + +#kubelet-1 +# [[inputs.prometheus]] +# name_prefix="container.azm.ms/" +# ## An array of urls to scrape metrics from. +# urls = ["$CADVISOR_METRICS_URL"] +# fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC"] + +# metric_version = 2 +# url_tag = "scrapeUrl" + +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" + +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to `https` & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# # monitor_kubernetes_pods = true + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" + +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# #tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = true +# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] +# [inputs.prometheus.tagpass] +# operation_type = ["create_container", "remove_container", "pull_image"] + +# #kubelet-2 +# [[inputs.prometheus]] +# name_prefix="container.azm.ms/" +# ## An array of urls to scrape metrics from. +# urls = ["$CADVISOR_METRICS_URL"] + +# fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] + +# metric_version = 2 +# url_tag = "scrapeUrl" + + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# insecure_skip_verify = true + + +## prometheus custom metrics +# [[inputs.prometheus]] + +# interval = "$AZMON_DS_PROM_INTERVAL" + +# ## An array of urls to scrape metrics from. +# urls = $AZMON_DS_PROM_URLS + +# fieldpass = $AZMON_DS_PROM_FIELDPASS + +# fielddrop = $AZMON_DS_PROM_FIELDDROP + +# metric_version = 2 +# url_tag = "scrapeUrl" + +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" + +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# #tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + +#Prometheus Custom Metrics +[[inputs.prometheus]] + interval = "1s" + + ## An array of urls to scrape metrics from. + # urls = $AZMON_RS_PROM_URLS + + # ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = $AZMON_RS_PROM_K8S_SERVICES + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + monitor_kubernetes_pods_version = 2 + # $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR + # $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR + + # fieldpass = $AZMON_SIDECAR_PROM_FIELDPASS + # fielddrop = $AZMON_SIDECAR_PROM_FIELDDROP + monitor_kubernetes_pods = true + metric_version = 2 + url_tag = "scrapeUrl" + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + +# $AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER + +## OSM Prometheus configuration +# $AZMON_SIDECAR_OSM_PROM_PLUGINS + + +# ##npm +# [[inputs.prometheus]] +# #name_prefix="container.azm.ms/" +# ## An array of urls to scrape metrics from. +# urls = [] + +# #metric_version = 2 +# url_tag = "scrapeUrl" + +# ## An array of Kubernetes services to scrape metrics from. +# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + +# ## Kubernetes config file to create client from. +# # kube_config = "/path/to/kubernetes.config" + +# ## Scrape Kubernetes pods for the following prometheus annotations: +# ## - prometheus.io/scrape: Enable scraping for this pod +# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to +# ## set this to `https` & most likely set the tls config. +# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. +# ## - prometheus.io/port: If port is not 9102 use this annotation +# monitor_kubernetes_pods = true + +# ## Use bearer token for authorization. ('bearer_token' takes priority) +# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" +# ## OR +# # bearer_token_string = "abc_123" + +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# response_timeout = "15s" + +# ## Optional TLS Config +# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +# #tls_cert = /path/to/certfile +# # tls_key = /path/to/keyfile +# ## Use TLS but skip chain & host verification +# insecure_skip_verify = true +# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] +# #[inputs.prometheus.tagpass] +# # operation_type = ["create_container", "remove_container", "pull_image"] + +# [[inputs.exec]] +# ## Commands array +# interval = "15m" +# commands = [ +# "/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh" +# ] + +# ## Timeout for each command to complete. +# timeout = "15s" + +# ## measurement name suffix (for separating different commands) +# name_suffix = "_telemetry" + +# ## Data format to consume. +# ## Each data format has its own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" +# tagexclude = ["hostName"] +# [inputs.exec.tags] +# AgentVersion = "$AGENT_VERSION" +# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" +# ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" +# Region = "$TELEMETRY_AKS_REGION" +# ClusterName = "$TELEMETRY_CLUSTER_NAME" +# ClusterType = "$TELEMETRY_CLUSTER_TYPE" +# Computer = "placeholder_hostname" +# ControllerType = "$CONTROLLER_TYPE" \ No newline at end of file diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 722392157..6053251bd 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -299,26 +299,27 @@ function Generate-Certificates { C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe } -function Bootstrap-CACertificates { - try { - # This is required when the root CA certs are different for some clouds. - $caCerts=Invoke-WebRequest 'http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json' -UseBasicParsing | ConvertFrom-Json - if (![string]::IsNullOrEmpty($caCerts)) { - $certificates = $caCerts.Certificates - for ($index = 0; $index -lt $certificates.Length ; $index++) { - $name=$certificates[$index].Name - $certificates[$index].CertBody > $name - Write-Host "name: $($name)" - Import-Certificate -FilePath .\$name -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose - } - } - } - catch { - $e = $_.Exception - Write-Host $e - Write-Host "exception occured in Bootstrap-CACertificates..." - } -} +#Commenting this out since wireserver access is no longer available +# function Bootstrap-CACertificates { +# try { +# # This is required when the root CA certs are different for some clouds. +# $caCerts=Invoke-WebRequest 'http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json' -UseBasicParsing | ConvertFrom-Json +# if (![string]::IsNullOrEmpty($caCerts)) { +# $certificates = $caCerts.Certificates +# for ($index = 0; $index -lt $certificates.Length ; $index++) { +# $name=$certificates[$index].Name +# $certificates[$index].CertBody > $name +# Write-Host "name: $($name)" +# Import-Certificate -FilePath .\$name -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose +# } +# } +# } +# catch { +# $e = $_.Exception +# Write-Host $e +# Write-Host "exception occured in Bootstrap-CACertificates..." +# } +# } function Test-CertificatePath { $certLocation = $env:CI_CERT_LOCATION @@ -346,12 +347,12 @@ Remove-WindowsServiceIfItExists "fluentdwinaks" Set-EnvironmentVariables Start-FileSystemWatcher -#Bootstrapping CA certs for non public clouds and AKS clusters -$aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") -if (![string]::IsNullOrEmpty($aksResourceId) -and $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) -{ - Bootstrap-CACertificates -} +#Bootstrapping CA certs for non public clouds and AKS clusters -Commenting this out since wireserver access is no longer available +# $aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") +# if (![string]::IsNullOrEmpty($aksResourceId) -and $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) +# { +# Bootstrap-CACertificates +# } Generate-Certificates Test-CertificatePath diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index dd6d52a11..cfc30e7f9 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -32,6 +32,22 @@ Write-Host ('Installing Fluent Bit'); } Write-Host ('Finished Installing Fluentbit') +Write-Host ('Installing Telegraf'); + + try { + $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win.zip' + Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip + Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win + Move-Item -Path /installation/telegraf-win/*/* -Destination /opt/telegraf-win/ -ErrorAction SilentlyContinue + } + catch { + $ex = $_.Exception + Write-Host "exception while downloading telegraf for windows" + Write-Host $ex + exit 1 + } +Write-Host ('Finished downloading Telegraf') + Write-Host ('Installing Visual C++ Redistributable Package') $vcRedistLocation = 'https://aka.ms/vs/16/release/vc_redist.x64.exe' From 64deacf7681c42961429507c6984ddfad91a0157 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Feb 2021 11:49:31 -0800 Subject: [PATCH 117/175] creating folder structure --- kubernetes/windows/Dockerfile | 3 +++ kubernetes/windows/setup.ps1 | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index f852bd236..ac7094212 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -62,6 +62,9 @@ COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows +# copy telegraf conf file +COPY ./omsagentwindows/installer/conf/telegraf.conf /etc/telegraf/ + # copy keepcert alive ruby scripts COPY ./omsagentwindows/installer/scripts/rubyKeepCertificateAlive/*.rb /etc/fluent/plugin/ diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index cfc30e7f9..2272295b7 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -8,10 +8,12 @@ Write-Host ('Creating folder structure') New-Item -Type Directory -Path /opt/fluent-bit New-Item -Type Directory -Path /opt/scripts/ruby + New-Item -Type Directory -Path /opt/telegraf New-Item -Type Directory -Path /etc/fluent-bit New-Item -Type Directory -Path /etc/fluent New-Item -Type Directory -Path /etc/omsagentwindows + New-Item -Type Directory -Path /etc/telegraf New-Item -Type Directory -Path /etc/config/settings/ New-Item -Type Directory -Path /etc/config/adx/ @@ -38,7 +40,7 @@ Write-Host ('Installing Telegraf'); $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win - Move-Item -Path /installation/telegraf-win/*/* -Destination /opt/telegraf-win/ -ErrorAction SilentlyContinue + Move-Item -Path /installation/telegraf-win/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue } catch { $ex = $_.Exception From 80aeb440099f464496b3876ab58379f1d29f1155 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Feb 2021 14:53:41 -0800 Subject: [PATCH 118/175] updating path --- kubernetes/windows/setup.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 2272295b7..a51f04e91 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -40,7 +40,7 @@ Write-Host ('Installing Telegraf'); $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win - Move-Item -Path /installation/telegraf-win/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue + Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue } catch { $ex = $_.Exception From 8bc05c968a8d42f07a33f134decaaa03fc62813d Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Feb 2021 15:38:34 -0800 Subject: [PATCH 119/175] Running telegraf service --- build/windows/installer/conf/fluent-bit.conf | 9 +++++++++ kubernetes/windows/main.ps1 | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/build/windows/installer/conf/fluent-bit.conf b/build/windows/installer/conf/fluent-bit.conf index 879ee4810..1eebe5fd6 100644 --- a/build/windows/installer/conf/fluent-bit.conf +++ b/build/windows/installer/conf/fluent-bit.conf @@ -12,6 +12,15 @@ Chunk_Size 32 Buffer_Size 64 +[INPUT] + Name tcp + Tag oms.container.perf.telegraf.* + Listen 0.0.0.0 + Port 25229 + Chunk_Size 32 + Buffer_Size 64 + Mem_Buf_Limit 5m + [OUTPUT] Name oms EnableTelemetry true diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 6053251bd..63215a205 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -294,6 +294,15 @@ function Start-Fluent { Notepad.exe | Out-Null } +function Start-Telegraf { + Write-Host "Installing telegraf service" + C:\opt\telegraf\telegraf-win\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" + Write-Host "Running telegraf service in test mode" + C:\opt\telegraf\telegraf-win\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test + Write-Host "Starting telegraf service" + C:\opt\telegraf\telegraf-win\telegraf.exe --service start +} + function Generate-Certificates { Write-Host "Generating Certificates" C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe @@ -357,6 +366,7 @@ Start-FileSystemWatcher Generate-Certificates Test-CertificatePath Start-Fluent +Start-Telegraf # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId From 95af4d41d1c43d68f3d09b2d7d014c45634ca3ac Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Feb 2021 18:04:36 -0800 Subject: [PATCH 120/175] changes --- kubernetes/windows/main.ps1 | 2 +- kubernetes/windows/setup.ps1 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 63215a205..fcfa2fbcc 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -365,8 +365,8 @@ Start-FileSystemWatcher Generate-Certificates Test-CertificatePath -Start-Fluent Start-Telegraf +Start-Fluent # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index a51f04e91..337e81195 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -37,7 +37,7 @@ Write-Host ('Finished Installing Fluentbit') Write-Host ('Installing Telegraf'); try { - $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win.zip' + $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win-debug.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue From 30259b9fc87a8aec53d4458e5306456aa80d8c22 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Feb 2021 19:03:40 -0800 Subject: [PATCH 121/175] setting env vars --- kubernetes/windows/main.ps1 | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index fcfa2fbcc..50ce8dcff 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -295,6 +295,21 @@ function Start-Fluent { } function Start-Telegraf { + Write-Host "Setting required environment variables for telegraf prometheus input plugin to run properly..." + $kubernetesServiceHost = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_HOST", "process") + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_HOST", $kubernetesServiceHost, 'machine') + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_HOST - $($kubernetesServiceHost) for target 'machine'..." + + $kubernetesServicePort = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_PORT", "process") + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_PORT", $kubernetesServicePort, 'machine') + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_PORT - $($kubernetesServicePort) for target 'machine'..." + + + $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, 'machine') + Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." + + Write-Host "Installing telegraf service" C:\opt\telegraf\telegraf-win\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" Write-Host "Running telegraf service in test mode" From d619bb67e576d75aa0e4efea2e7fa7bcf4e14a59 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Feb 2021 19:39:21 -0800 Subject: [PATCH 122/175] fixing pre req script --- scripts/build/windows/install-build-pre-requisites.ps1 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index b5e6e2d18..3bb56ac2a 100755 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -21,7 +21,7 @@ function Install-Go { # install go lang Write-Host("installing go ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing go completed") Write-Host "updating PATH variable" @@ -102,7 +102,7 @@ function Install-DotNetCoreSDK() { # install dotNet core sdk Write-Host("installing .net core sdk 3.1 ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing .net core sdk 3.1 completed") } @@ -129,7 +129,7 @@ function Install-Docker() { # install docker Write-Host("installing docker for desktop ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing docker for desktop completed") } From 84365188550f712e8789c7d4614b3451130f0609 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 24 Feb 2021 12:07:38 -0800 Subject: [PATCH 123/175] adding null checks --- kubernetes/windows/main.ps1 | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 50ce8dcff..47acd08a1 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -297,18 +297,31 @@ function Start-Fluent { function Start-Telegraf { Write-Host "Setting required environment variables for telegraf prometheus input plugin to run properly..." $kubernetesServiceHost = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_HOST", "process") - [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_HOST", $kubernetesServiceHost, 'machine') - Write-Host "Successfully set environment variable KUBERNETES_SERVICE_HOST - $($kubernetesServiceHost) for target 'machine'..." + if (![string]::IsNullOrEmpty($kubernetesServiceHost)) { + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_HOST", $kubernetesServiceHost, 'machine') + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_HOST - $($kubernetesServiceHost) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable KUBERNETES_SERVICE_HOST for target 'machine' since it is either null or empty" + } $kubernetesServicePort = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_PORT", "process") - [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_PORT", $kubernetesServicePort, 'machine') - Write-Host "Successfully set environment variable KUBERNETES_SERVICE_PORT - $($kubernetesServicePort) for target 'machine'..." - - + if (![string]::IsNullOrEmpty($kubernetesServicePort)) { + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_PORT", $kubernetesServicePort, 'machine') + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_PORT - $($kubernetesServicePort) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable KUBERNETES_SERVICE_PORT for target 'machine' since it is either null or empty" + } + $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") - [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, 'machine') - Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." - + if (![string]::IsNullOrEmpty($nodeIp)) { + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, 'machine') + Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" + } Write-Host "Installing telegraf service" C:\opt\telegraf\telegraf-win\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" From 44824857146216295bd381d4e266b26ac6a56817 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 24 Feb 2021 18:07:11 -0800 Subject: [PATCH 124/175] configparser changes --- .../scripts/tomlparser-prom-customconfig.rb | 382 ++++++++++++++++++ .../installer/datafiles/base_container.data | 2 +- .../scripts/tomlparser-prom-customconfig.rb | 51 ++- build/windows/installer/conf/telegraf.conf | 19 +- kubernetes/omsagent.yaml | 2 + kubernetes/windows/main.ps1 | 17 +- .../setdefaulttelegrafenvvariables.ps1 | 15 + 7 files changed, 451 insertions(+), 37 deletions(-) create mode 100644 build/common/installer/scripts/tomlparser-prom-customconfig.rb create mode 100644 kubernetes/windows/setdefaulttelegrafenvvariables.ps1 diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb new file mode 100644 index 000000000..8407f516e --- /dev/null +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -0,0 +1,382 @@ +#!/usr/local/bin/ruby + +require_relative "tomlrb" +require_relative "ConfigParseErrorLogger" +require "fileutils" + +@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" +@replicaset = "replicaset" +@daemonset = "daemonset" +@promSideCar = "prometheus-sidecar" +@windows = "windows" +@configSchemaVersion = "" +@defaultDsInterval = "1m" +@defaultDsPromUrls = [] +@defaultDsFieldPass = [] +@defaultDsFieldDrop = [] +@defaultRsInterval = "1m" +@defaultRsPromUrls = [] +@defaultRsFieldPass = [] +@defaultRsFieldDrop = [] +@defaultRsK8sServices = [] +# @defaultRsMonitorPods = false +@defaultCustomPrometheusInterval = "1m" +@defaultCustomPrometheusFieldPass = [] +@defaultCustomPrometheusFieldDrop = [] +@defaultCustomPrometheusMonitorPods = false +@defaultCustomPrometheusLabelSelectors = "" +@defaultCustomPrometheusFieldSelectors = "" + +#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering +@metricVersion = 2 +@monitorKubernetesPodsVersion = 2 +@urlTag = "scrapeUrl" +@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" +@responseTimeout = "15s" +@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +@insecureSkipVerify = true + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@promConfigMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values for prometheus config map" + parsedConfig = Tomlrb.load_file(@promConfigMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted prometheus config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for prometheus scraping" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for prometheus config: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +def checkForTypeArray(arrayValue, arrayType) + if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) + return true + else + return false + end +end + +def checkForType(variable, varType) + if variable.nil? || variable.kind_of?(varType) + return true + else + return false + end +end + +def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) + rescue => errorStr + puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" + end + return new_contents +end + +def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with namespace filters" + + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR") + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR") + + pluginConfigsWithNamespaces = "" + monitorKubernetesPodsNamespaces.each do |namespace| + if !namespace.nil? + #Stripping namespaces to remove leading and trailing whitespaces + namespace.strip! + if namespace.length > 0 + pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + interval = \"#{interval}\" + monitor_kubernetes_pods = true + monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} + monitor_kubernetes_pods_namespace = \"#{namespace}\" + kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" + kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" + fieldpass = #{fieldPassSetting} + fielddrop = #{fieldDropSetting} + metric_version = #{@metricVersion} + url_tag = \"#{@urlTag}\" + bearer_token = \"#{@bearerToken}\" + response_timeout = \"#{@responseTimeout}\" + tls_ca = \"#{@tlsCa}\" + insecure_skip_verify = #{@insecureSkipVerify}\n" + end + end + end + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) + return new_contents + rescue => errorStr + puts "Exception while creating prometheus input plugins to filter namespaces in sidecar: #{errorStr}, using defaults" + replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + # Checking to see if this is the daemonset or replicaset to parse config accordingly + controller = ENV["CONTROLLER_TYPE"] + containerType = ENV["CONTAINER_TYPE"] + containerOs = ENV["CONTAINER_OS"] + if !controller.nil? + if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? + if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus replicaset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] + kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] + # monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + # monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(kubernetesServices, String) && + checkForTypeArray(urls, String) + # (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) # Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for replicaset" + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultRsInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop + kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices + urls = (urls.nil?) ? @defaultRsPromUrls : urls + # monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + + file_name = "/opt/telegraf-test-rs.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf", file_name) + + puts "config::Starting to substitute the placeholders in telegraf conf copy file for replicaset" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) + fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting) + fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting) + new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) + + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + # if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) + # monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + # else + # new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) + # monitorKubernetesPodsNamespacesLength = 0 + # end + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") + file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") + # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for replicaset" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") + setRsPromDefaults + puts "****************End Prometheus Config Processing********************" + end + elsif controller.casecmp(@daemonset) == 0 && + ((!containerType.nil? && containerType.casecmp(@promSideCar) == 0) || ((!containerOs.nil? && containerOs.casecmp(@windows) == 0))) && + !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus sidecar custom config settings for monitor kubernetes pods + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] + kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForType(kubernetesLabelSelectors, String) && + checkForType(kubernetesFieldSelectors, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for custom prometheus scraping" + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultCustomPrometheusInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultCustomPrometheusFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultCustomPrometheusFieldDrop : fieldDrop + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultCustomPrometheusMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors + + if (!containerOs.nil? && containerOs.casecmp(@windows) == 0) + file_name = "/etc/telegraf/telegraf.conf" + else + file_name = "/opt/telegraf-test-prom-side-car.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf", file_name) + end + puts "config::Starting to substitute the placeholders in telegraf conf copy file for linux or conf file for windows for custom prometheus scraping" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_SIDECAR_PROM_INTERVAL", interval) + fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDPASS", fieldPassSetting) + fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" + #new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDDROP", fieldDropSetting) + + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + monitorKubernetesPodsNSConfig = [] + if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (monitorKubernetesPodsNamespaces.length > 0) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. + # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. + kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for prometheus sidecar" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for prometheus side car, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for promethues side car: #{errorStr}, using defaults") + # look into this + #setRsPromDefaults + puts "****************End Prometheus Config Processing********************" + end + elsif controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? + #Get prometheus daemonset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:node][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:node][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:node][:urls] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(urls, String) + puts "config::Successfully passed typecheck for config settings for daemonset" + + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultDsInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultDsFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultDsFieldDrop : fieldDrop + urls = (urls.nil?) ? @defaultDsPromUrls : urls + + file_name = "/opt/telegraf-test.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf.conf", file_name) + + puts "config::Starting to substitute the placeholders in telegraf conf copy file for daemonset" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_DS_PROM_INTERVAL", interval) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDPASS", ((fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDDROP", ((fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for daemonset" + + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + file.write("export TELEMETRY_DS_PROM_INTERVAL=\"#{interval}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_DS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_DS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_DS_PROM_URLS_LENGTH=#{urls.length}\n") + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for daemonset" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for daemonset, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults, please check correctness of configmap") + puts "****************End Prometheus Config Processing********************" + end + end # end of controller type check + end + else + ConfigParseErrorLogger.logError("Controller undefined while processing prometheus config, using defaults") + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Prometheus Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@promConfigMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported version") + else + puts "config::No configmap mounted for prometheus custom config, using defaults" + end +end +puts "****************End Prometheus Config Processing********************" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index cae9ab21d..df8fbc3da 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -122,7 +122,7 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root /opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root /opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root -/opt/tomlparser-prom-customconfig.rb; build/linux/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root +/opt/tomlparser-prom-customconfig.rb; build/common/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root /opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root /opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb index 0666b1300..8407f516e 100644 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb @@ -8,6 +8,7 @@ @replicaset = "replicaset" @daemonset = "daemonset" @promSideCar = "prometheus-sidecar" +@windows = "windows" @configSchemaVersion = "" @defaultDsInterval = "1m" @defaultDsPromUrls = [] @@ -19,12 +20,12 @@ @defaultRsFieldDrop = [] @defaultRsK8sServices = [] # @defaultRsMonitorPods = false -@defaultSidecarInterval = "1m" -@defaultSidecarFieldPass = [] -@defaultSidecarFieldDrop = [] -@defaultSidecarMonitorPods = false -@defaultSidecarLabelSelectors = "" -@defaultSidecarFieldSelectors = "" +@defaultCustomPrometheusInterval = "1m" +@defaultCustomPrometheusFieldPass = [] +@defaultCustomPrometheusFieldDrop = [] +@defaultCustomPrometheusMonitorPods = false +@defaultCustomPrometheusLabelSelectors = "" +@defaultCustomPrometheusFieldSelectors = "" #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @metricVersion = 2 @@ -128,6 +129,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Checking to see if this is the daemonset or replicaset to parse config accordingly controller = ENV["CONTROLLER_TYPE"] containerType = ENV["CONTAINER_TYPE"] + containerOs = ENV["CONTAINER_OS"] if !controller.nil? if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? @@ -209,7 +211,9 @@ def populateSettingValuesFromConfigMap(parsedConfig) setRsPromDefaults puts "****************End Prometheus Config Processing********************" end - elsif controller.casecmp(@daemonset) == 0 && !containerType.nil? && containerType.casecmp(@promSideCar) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + elsif controller.casecmp(@daemonset) == 0 && + ((!containerType.nil? && containerType.casecmp(@promSideCar) == 0) || ((!containerOs.nil? && containerOs.casecmp(@windows) == 0))) && + !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? #Get prometheus sidecar custom config settings for monitor kubernetes pods begin interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] @@ -227,20 +231,23 @@ def populateSettingValuesFromConfigMap(parsedConfig) checkForTypeArray(fieldPass, String) && checkForTypeArray(fieldDrop, String) && (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby - puts "config::Successfully passed typecheck for config settings for prometheus side car" + puts "config::Successfully passed typecheck for config settings for custom prometheus scraping" #if setting is nil assign default values - interval = (interval.nil?) ? @defaultSidecarInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultSidecarFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultSidecarFieldDrop : fieldDrop - monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultSidecarMonitorPods : monitorKubernetesPods - kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultSidecarLabelSelectors : kubernetesLabelSelectors - kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultSidecarFieldSelectors : kubernetesFieldSelectors + interval = (interval.nil?) ? @defaultCustomPrometheusInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultCustomPrometheusFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultCustomPrometheusFieldDrop : fieldDrop + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultCustomPrometheusMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors - file_name = "/opt/telegraf-test-prom-side-car.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf", file_name) - - puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car" + if (!containerOs.nil? && containerOs.casecmp(@windows) == 0) + file_name = "/etc/telegraf/telegraf.conf" + else + file_name = "/opt/telegraf-test-prom-side-car.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf", file_name) + end + puts "config::Starting to substitute the placeholders in telegraf conf copy file for linux or conf file for windows for custom prometheus scraping" #Replace the placeholder config values with values from custom config text = File.read(file_name) new_contents = text.gsub("$AZMON_SIDECAR_PROM_INTERVAL", interval) @@ -267,14 +274,14 @@ def populateSettingValuesFromConfigMap(parsedConfig) new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) monitorKubernetesPodsNamespacesLength = 0 end - + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length - kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(',').length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for prometheus side car" + puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" #Set environment variables for telemetry file = File.open("telemetry_prom_config_env_var", "w") if !file.nil? diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf index 809f3538e..345394ae4 100644 --- a/build/windows/installer/conf/telegraf.conf +++ b/build/windows/installer/conf/telegraf.conf @@ -727,7 +727,7 @@ #Prometheus Custom Metrics [[inputs.prometheus]] - interval = "1s" + interval = "$AZMON_TELEGRAF_PROM_INTERVAL" ## An array of urls to scrape metrics from. # urls = $AZMON_RS_PROM_URLS @@ -741,13 +741,14 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation + $AZMON_TELEGRAF_PROM_MONITOR_PODS monitor_kubernetes_pods_version = 2 - # $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR - # $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR + $AZMON_TELEGRAF_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_PROM_FIELDDROP - # fieldpass = $AZMON_SIDECAR_PROM_FIELDPASS - # fielddrop = $AZMON_SIDECAR_PROM_FIELDDROP - monitor_kubernetes_pods = true metric_version = 2 url_tag = "scrapeUrl" ## Kubernetes config file to create client from. @@ -769,11 +770,7 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -# $AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER - -## OSM Prometheus configuration -# $AZMON_SIDECAR_OSM_PROM_PLUGINS - +$AZMON_TELEGRAF_PROM_PLUGINS_WITH_NAMESPACE_FILTER # ##npm # [[inputs.prometheus]] diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 3c9fdf911..a026ad2a6 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -783,6 +783,8 @@ spec: # value: "my_acs_cluster_name" - name: CONTROLLER_TYPE value: "DaemonSet" + - name: CONTAINER_OS + value: "Windows" - name: HOSTNAME valueFrom: fieldRef: diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 47acd08a1..96a5d9bac 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -295,10 +295,21 @@ function Start-Fluent { } function Start-Telegraf { + # Set default telegraf environment variables for prometheus scraping + Write-Host "**********Setting default environment variables for telegraf prometheus plugin..." + .\setdefaulttelegrafenvvariables.ps1 + + # run prometheus custom config parser + Write-Host "**********Running config parser for custom prometheus scraping**********" + ruby /opt/omsagentwindows/scripts/ruby/tomlparser-prom-customconfig.rb + Write-Host "**********End running config parser for custom prometheus scraping**********" + + + # Set required environment variable for telegraf prometheus plugin to run properly Write-Host "Setting required environment variables for telegraf prometheus input plugin to run properly..." $kubernetesServiceHost = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_HOST", "process") if (![string]::IsNullOrEmpty($kubernetesServiceHost)) { - [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_HOST", $kubernetesServiceHost, 'machine') + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_HOST", $kubernetesServiceHost, "machine") Write-Host "Successfully set environment variable KUBERNETES_SERVICE_HOST - $($kubernetesServiceHost) for target 'machine'..." } else { @@ -307,7 +318,7 @@ function Start-Telegraf { $kubernetesServicePort = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_PORT", "process") if (![string]::IsNullOrEmpty($kubernetesServicePort)) { - [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_PORT", $kubernetesServicePort, 'machine') + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_PORT", $kubernetesServicePort, "machine") Write-Host "Successfully set environment variable KUBERNETES_SERVICE_PORT - $($kubernetesServicePort) for target 'machine'..." } else { @@ -316,7 +327,7 @@ function Start-Telegraf { $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") if (![string]::IsNullOrEmpty($nodeIp)) { - [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, 'machine') + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." } else { diff --git a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 new file mode 100644 index 000000000..4e570c44c --- /dev/null +++ b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 @@ -0,0 +1,15 @@ +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_INTERVAL", "1m", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_INTERVAL", "1m", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDPASS", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDPASS", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDDROP", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDDROP", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "machine") + From 46b5ef59503a1760242d291599dac4027735fe2f Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 24 Feb 2021 18:29:53 -0800 Subject: [PATCH 125/175] refactor changes --- .../scripts/tomlparser-prom-customconfig.rb | 52 +-- .../conf/telegraf-prom-side-car.conf | 16 +- .../scripts/tomlparser-osm-config.rb | 8 +- .../scripts/tomlparser-prom-customconfig.rb | 382 ------------------ build/windows/installer/conf/telegraf.conf | 22 +- .../linux/defaultpromenvvariables-sidecar | 16 +- .../setdefaulttelegrafenvvariables.ps1 | 28 +- source/plugins/go/src/telemetry.go | 30 +- 8 files changed, 84 insertions(+), 470 deletions(-) delete mode 100644 build/linux/installer/scripts/tomlparser-prom-customconfig.rb diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 8407f516e..fe360e296 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -74,12 +74,12 @@ def checkForType(variable, varType) def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) begin puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) rescue => errorStr - puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" + puts "Exception while replacing default pod monitor settings for custom prometheus scraping: #{errorStr}" end return new_contents end @@ -88,9 +88,9 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu begin puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with namespace filters" - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR") pluginConfigsWithNamespaces = "" monitorKubernetesPodsNamespaces.each do |namespace| @@ -116,10 +116,10 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu end end end - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) return new_contents rescue => errorStr - puts "Exception while creating prometheus input plugins to filter namespaces in sidecar: #{errorStr}, using defaults" + puts "Exception while creating prometheus input plugins to filter namespaces for custom prometheus: #{errorStr}, using defaults" replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) end end @@ -214,7 +214,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) elsif controller.casecmp(@daemonset) == 0 && ((!containerType.nil? && containerType.casecmp(@promSideCar) == 0) || ((!containerOs.nil? && containerOs.casecmp(@windows) == 0))) && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? - #Get prometheus sidecar custom config settings for monitor kubernetes pods + #Get prometheus custom config settings for monitor kubernetes pods begin interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] @@ -250,11 +250,11 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "config::Starting to substitute the placeholders in telegraf conf copy file for linux or conf file for windows for custom prometheus scraping" #Replace the placeholder config values with values from custom config text = File.read(file_name) - new_contents = text.gsub("$AZMON_SIDECAR_PROM_INTERVAL", interval) + new_contents = text.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", interval) fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDPASS", fieldPassSetting) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", fieldPassSetting) fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - #new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDDROP", fieldDropSetting) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", fieldDropSetting) # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - @@ -282,18 +282,20 @@ def populateSettingValuesFromConfigMap(parsedConfig) File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + #Set environment variables for telemetry in the sidecar container + if (!containerType.nil? && containerType.casecmp(@promSideCar) == 0) + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for prometheus sidecar" + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for prometheus sidecar" + end end else ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for prometheus side car, using defaults, please use right types for all settings") diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index d35dd4b6d..585b7b423 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -727,7 +727,7 @@ #Prometheus Custom Metrics [[inputs.prometheus]] - interval = "$AZMON_SIDECAR_PROM_INTERVAL" + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" ## An array of urls to scrape metrics from. # urls = $AZMON_RS_PROM_URLS @@ -741,13 +741,13 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - $AZMON_SIDECAR_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS monitor_kubernetes_pods_version = 2 - $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR - $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR - fieldpass = $AZMON_SIDECAR_PROM_FIELDPASS - fielddrop = $AZMON_SIDECAR_PROM_FIELDDROP + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP metric_version = 2 url_tag = "scrapeUrl" @@ -770,10 +770,10 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER ## OSM Prometheus configuration -$AZMON_SIDECAR_OSM_PROM_PLUGINS +$AZMON_TELEGRAF_OSM_PROM_PLUGINS # ##npm diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index bd5322e70..fb752de0a 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -109,10 +109,10 @@ def replaceOsmTelegrafConfigPlaceHolders end end end - tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) + tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) else puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" - tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") + tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", "") end File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" @@ -171,10 +171,10 @@ def replaceOsmTelegrafConfigPlaceHolders # end # end # end -# tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) +# tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) # else # puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" -# tgfConfig = tgfConfig.gsub("$AZMON_SIDECAR_OSM_PROM_PLUGINS", "") +# tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", "") # end # File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb deleted file mode 100644 index 8407f516e..000000000 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ /dev/null @@ -1,382 +0,0 @@ -#!/usr/local/bin/ruby - -require_relative "tomlrb" -require_relative "ConfigParseErrorLogger" -require "fileutils" - -@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" -@replicaset = "replicaset" -@daemonset = "daemonset" -@promSideCar = "prometheus-sidecar" -@windows = "windows" -@configSchemaVersion = "" -@defaultDsInterval = "1m" -@defaultDsPromUrls = [] -@defaultDsFieldPass = [] -@defaultDsFieldDrop = [] -@defaultRsInterval = "1m" -@defaultRsPromUrls = [] -@defaultRsFieldPass = [] -@defaultRsFieldDrop = [] -@defaultRsK8sServices = [] -# @defaultRsMonitorPods = false -@defaultCustomPrometheusInterval = "1m" -@defaultCustomPrometheusFieldPass = [] -@defaultCustomPrometheusFieldDrop = [] -@defaultCustomPrometheusMonitorPods = false -@defaultCustomPrometheusLabelSelectors = "" -@defaultCustomPrometheusFieldSelectors = "" - -#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering -@metricVersion = 2 -@monitorKubernetesPodsVersion = 2 -@urlTag = "scrapeUrl" -@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" -@responseTimeout = "15s" -@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -@insecureSkipVerify = true - -# Use parser to parse the configmap toml file to a ruby structure -def parseConfigMap - begin - # Check to see if config map is created - if (File.file?(@promConfigMapMountPath)) - puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values for prometheus config map" - parsedConfig = Tomlrb.load_file(@promConfigMapMountPath, symbolize_keys: true) - puts "config::Successfully parsed mounted prometheus config map" - return parsedConfig - else - puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for prometheus scraping" - return nil - end - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config map for prometheus config: #{errorStr}, using defaults, please check config map for errors") - return nil - end -end - -def checkForTypeArray(arrayValue, arrayType) - if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) - return true - else - return false - end -end - -def checkForType(variable, varType) - if variable.nil? || variable.kind_of?(varType) - return true - else - return false - end -end - -def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) - begin - puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) - rescue => errorStr - puts "Exception while replacing default pod monitor settings for sidecar: #{errorStr}" - end - return new_contents -end - -def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) - begin - puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with namespace filters" - - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_MONITOR_PODS") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR") - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR") - - pluginConfigsWithNamespaces = "" - monitorKubernetesPodsNamespaces.each do |namespace| - if !namespace.nil? - #Stripping namespaces to remove leading and trailing whitespaces - namespace.strip! - if namespace.length > 0 - pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] - interval = \"#{interval}\" - monitor_kubernetes_pods = true - monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} - monitor_kubernetes_pods_namespace = \"#{namespace}\" - kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" - kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" - fieldpass = #{fieldPassSetting} - fielddrop = #{fieldDropSetting} - metric_version = #{@metricVersion} - url_tag = \"#{@urlTag}\" - bearer_token = \"#{@bearerToken}\" - response_timeout = \"#{@responseTimeout}\" - tls_ca = \"#{@tlsCa}\" - insecure_skip_verify = #{@insecureSkipVerify}\n" - end - end - end - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) - return new_contents - rescue => errorStr - puts "Exception while creating prometheus input plugins to filter namespaces in sidecar: #{errorStr}, using defaults" - replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) - end -end - -# Use the ruby structure created after config parsing to set the right values to be used as environment variables -def populateSettingValuesFromConfigMap(parsedConfig) - # Checking to see if this is the daemonset or replicaset to parse config accordingly - controller = ENV["CONTROLLER_TYPE"] - containerType = ENV["CONTAINER_TYPE"] - containerOs = ENV["CONTAINER_OS"] - if !controller.nil? - if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? - if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? - #Get prometheus replicaset custom config settings - begin - interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] - fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] - fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] - urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] - kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] - # monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] - # monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] - - # Check for the right datattypes to enforce right setting values - if checkForType(interval, String) && - checkForTypeArray(fieldPass, String) && - checkForTypeArray(fieldDrop, String) && - checkForTypeArray(kubernetesServices, String) && - checkForTypeArray(urls, String) - # (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) # Checking for Boolean type, since 'Boolean' is not defined as a type in ruby - puts "config::Successfully passed typecheck for config settings for replicaset" - #if setting is nil assign default values - interval = (interval.nil?) ? @defaultRsInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop - kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices - urls = (urls.nil?) ? @defaultRsPromUrls : urls - # monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods - - file_name = "/opt/telegraf-test-rs.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf", file_name) - - puts "config::Starting to substitute the placeholders in telegraf conf copy file for replicaset" - #Replace the placeholder config values with values from custom config - text = File.read(file_name) - new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) - fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting) - fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting) - new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) - - # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces - # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - - # - to use defaults in case of nil settings - # if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) - # new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) - # monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length - # else - # new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - # monitorKubernetesPodsNamespacesLength = 0 - # end - - File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") - file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") - # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") - # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") - - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for replicaset" - end - else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") - end # end of type check condition - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") - setRsPromDefaults - puts "****************End Prometheus Config Processing********************" - end - elsif controller.casecmp(@daemonset) == 0 && - ((!containerType.nil? && containerType.casecmp(@promSideCar) == 0) || ((!containerOs.nil? && containerOs.casecmp(@windows) == 0))) && - !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? - #Get prometheus sidecar custom config settings for monitor kubernetes pods - begin - interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] - fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] - fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] - monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] - monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] - kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] - kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] - - # Check for the right datattypes to enforce right setting values - if checkForType(interval, String) && - checkForType(kubernetesLabelSelectors, String) && - checkForType(kubernetesFieldSelectors, String) && - checkForTypeArray(fieldPass, String) && - checkForTypeArray(fieldDrop, String) && - (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby - puts "config::Successfully passed typecheck for config settings for custom prometheus scraping" - #if setting is nil assign default values - interval = (interval.nil?) ? @defaultCustomPrometheusInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultCustomPrometheusFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultCustomPrometheusFieldDrop : fieldDrop - monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultCustomPrometheusMonitorPods : monitorKubernetesPods - kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors - kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors - - if (!containerOs.nil? && containerOs.casecmp(@windows) == 0) - file_name = "/etc/telegraf/telegraf.conf" - else - file_name = "/opt/telegraf-test-prom-side-car.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf", file_name) - end - puts "config::Starting to substitute the placeholders in telegraf conf copy file for linux or conf file for windows for custom prometheus scraping" - #Replace the placeholder config values with values from custom config - text = File.read(file_name) - new_contents = text.gsub("$AZMON_SIDECAR_PROM_INTERVAL", interval) - fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDPASS", fieldPassSetting) - fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - #new_contents = new_contents.gsub("$AZMON_SIDECAR_PROM_FIELDDROP", fieldDropSetting) - - # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces - # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - - # - to use defaults in case of nil settings - monitorKubernetesPodsNSConfig = [] - if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) - # Adding a check to see if an empty array is passed for kubernetes namespaces - if (monitorKubernetesPodsNamespaces.length > 0) - new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) - monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length - monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces - else - new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) - monitorKubernetesPodsNamespacesLength = 0 - end - else - new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) - monitorKubernetesPodsNamespacesLength = 0 - end - - # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. - # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. - kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length - kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length - - File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") - file.write("export TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") - - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for prometheus sidecar" - end - else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for prometheus side car, using defaults, please use right types for all settings") - end # end of type check condition - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for promethues side car: #{errorStr}, using defaults") - # look into this - #setRsPromDefaults - puts "****************End Prometheus Config Processing********************" - end - elsif controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? - #Get prometheus daemonset custom config settings - begin - interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] - fieldPass = parsedConfig[:prometheus_data_collection_settings][:node][:fieldpass] - fieldDrop = parsedConfig[:prometheus_data_collection_settings][:node][:fielddrop] - urls = parsedConfig[:prometheus_data_collection_settings][:node][:urls] - - # Check for the right datattypes to enforce right setting values - if checkForType(interval, String) && - checkForTypeArray(fieldPass, String) && - checkForTypeArray(fieldDrop, String) && - checkForTypeArray(urls, String) - puts "config::Successfully passed typecheck for config settings for daemonset" - - #if setting is nil assign default values - interval = (interval.nil?) ? @defaultDsInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultDsFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultDsFieldDrop : fieldDrop - urls = (urls.nil?) ? @defaultDsPromUrls : urls - - file_name = "/opt/telegraf-test.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf.conf", file_name) - - puts "config::Starting to substitute the placeholders in telegraf conf copy file for daemonset" - #Replace the placeholder config values with values from custom config - text = File.read(file_name) - new_contents = text.gsub("$AZMON_DS_PROM_INTERVAL", interval) - new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDPASS", ((fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDDROP", ((fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_DS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) - File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for daemonset" - - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - file.write("export TELEMETRY_DS_PROM_INTERVAL=\"#{interval}\"\n") - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_DS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_DS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_DS_PROM_URLS_LENGTH=#{urls.length}\n") - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for daemonset" - end - else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for daemonset, using defaults, please use right types for all settings") - end # end of type check condition - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults, please check correctness of configmap") - puts "****************End Prometheus Config Processing********************" - end - end # end of controller type check - end - else - ConfigParseErrorLogger.logError("Controller undefined while processing prometheus config, using defaults") - end -end - -@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] -puts "****************Start Prometheus Config Processing********************" -if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it - configMapSettings = parseConfigMap - if !configMapSettings.nil? - populateSettingValuesFromConfigMap(configMapSettings) - end -else - if (File.file?(@promConfigMapMountPath)) - ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported version") - else - puts "config::No configmap mounted for prometheus custom config, using defaults" - end -end -puts "****************End Prometheus Config Processing********************" diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf index 345394ae4..be2ddfced 100644 --- a/build/windows/installer/conf/telegraf.conf +++ b/build/windows/installer/conf/telegraf.conf @@ -727,27 +727,21 @@ #Prometheus Custom Metrics [[inputs.prometheus]] - interval = "$AZMON_TELEGRAF_PROM_INTERVAL" - - ## An array of urls to scrape metrics from. - # urls = $AZMON_RS_PROM_URLS - - # ## An array of Kubernetes services to scrape metrics from. - # kubernetes_services = $AZMON_RS_PROM_K8S_SERVICES - + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" + ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - $AZMON_TELEGRAF_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS monitor_kubernetes_pods_version = 2 - $AZMON_TELEGRAF_PROM_KUBERNETES_LABEL_SELECTOR - $AZMON_TELEGRAF_PROM_KUBERNETES_FIELD_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR - fieldpass = $AZMON_TELEGRAF_PROM_FIELDPASS - fielddrop = $AZMON_TELEGRAF_PROM_FIELDDROP + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP metric_version = 2 url_tag = "scrapeUrl" @@ -770,7 +764,7 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -$AZMON_TELEGRAF_PROM_PLUGINS_WITH_NAMESPACE_FILTER +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER # ##npm # [[inputs.prometheus]] diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index 6ceb28516..6c9d63ef7 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -1,8 +1,8 @@ -export AZMON_SIDECAR_PROM_INTERVAL="1m" -export AZMON_SIDECAR_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" -export AZMON_SIDECAR_PROM_FIELDPASS="[]" -export AZMON_SIDECAR_PROM_FIELDDROP="[]" -export AZMON_SIDECAR_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" -export AZMON_SIDECAR_OSM_PROM_PLUGINS="" -export AZMON_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = \"\"" -export AZMON_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = \"\"" +export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = \"\"" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = \"\"" diff --git a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 index 4e570c44c..272de23f1 100644 --- a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 +++ b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 @@ -1,15 +1,15 @@ -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_INTERVAL", "1m", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_INTERVAL", "1m", "machine") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "machine") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDPASS", "[]", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDPASS", "[]", "machine") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDDROP", "[]", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_FIELDDROP", "[]", "machine") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "machine") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "machine") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "machine") diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 644cbe45a..823feebcb 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -79,9 +79,9 @@ const ( defaultTelemetryPushIntervalSeconds = 300 - eventNameContainerLogInit = "ContainerLogPluginInitialized" - eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" - eventNamePrometheusSidecarHeartbeat = "PrometheusSidecarHeartbeatEvent" + eventNameContainerLogInit = "ContainerLogPluginInitialized" + eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" + eventNameCustomPrometheusSidecarHeartbeat = "CustomPrometheusSidecarHeartbeatEvent" ) // SendContainerLogPluginMetrics is a go-routine that flushes the data periodically (every 5 mins to App Insights) @@ -137,21 +137,21 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheus-sidecar") == 0 { telemetryDimensions := make(map[string]string) telemetryDimensions["ContainerType"] = "prometheus-sidecar" - telemetryDimensions["SidecarPromMonitorPods"] = promMonitorPods + telemetryDimensions["CustomPromMonitorPods"] = promMonitorPods if promMonitorPodsNamespaceLength > 0 { - telemetryDimensions["SidecarPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) + telemetryDimensions["CustomPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) } if promMonitorPodsLabelSelectorLength > 0 { - telemetryDimensions["SidecarPromMonitorPodsLabelSelectorLength"] = strconv.Itoa(promMonitorPodsLabelSelectorLength) + telemetryDimensions["CustomPromMonitorPodsLabelSelectorLength"] = strconv.Itoa(promMonitorPodsLabelSelectorLength) } if promMonitorPodsFieldSelectorLength > 0 { - telemetryDimensions["SidecarPromMonitorPodsFieldSelectorLength"] = strconv.Itoa(promMonitorPodsFieldSelectorLength) + telemetryDimensions["CustomPromMonitorPodsFieldSelectorLength"] = strconv.Itoa(promMonitorPodsFieldSelectorLength) } if osmNamespaceCount > 0 { telemetryDimensions["OsmNamespaceCount"] = strconv.Itoa(osmNamespaceCount) } - SendEvent(eventNamePrometheusSidecarHeartbeat, telemetryDimensions) + SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) } else { SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) @@ -306,24 +306,24 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { Log("OSM namespace count string to int conversion error %s", err.Error()) OSMNamespaceCount = 0 } - PromMonitorPods = os.Getenv("TELEMETRY_SIDECAR_PROM_MONITOR_PODS") - promMonPodsNamespaceLength := os.Getenv("TELEMETRY_SIDECAR_PROM_MONITOR_PODS_NS_LENGTH") + PromMonitorPods = os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS") + promMonPodsNamespaceLength := os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH") PromMonitorPodsNamespaceLength, err = strconv.Atoi(promMonPodsNamespaceLength) if err != nil { - Log("Prometheus sidecar monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) + Log("Custom prometheus monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) PromMonitorPodsNamespaceLength = 0 } - promLabelSelectorLength := os.Getenv("TELEMETRY_SIDECAR_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH") + promLabelSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH") PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) if err != nil { - Log("Prometheus sidecar label selector count string to int conversion error %s", err.Error()) + Log("Custom prometheus label selector count string to int conversion error %s", err.Error()) PromMonitorPodsLabelSelectorLength = 0 } - promFieldSelectorLength := os.Getenv("TELEMETRY_SIDECAR_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH") + promFieldSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH") PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) if err != nil { - Log("Prometheus sidecar field selector count string to int conversion error %s", err.Error()) + Log("Custom prometheus field selector count string to int conversion error %s", err.Error()) PromMonitorPodsFieldSelectorLength = 0 } From f6fc4a46ff962cc63c7ba0ff4619df953312e418 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 24 Feb 2021 18:54:07 -0800 Subject: [PATCH 126/175] fixinf defaults --- kubernetes/linux/defaultpromenvvariables-sidecar | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index 6c9d63ef7..be3a128f6 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -4,5 +4,5 @@ export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" -export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = \"\"" -export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = \"\"" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = ''" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = ''" From b9284548063f0ea99dfa0fc54a842c0f951c7b30 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 25 Feb 2021 12:24:55 -0800 Subject: [PATCH 127/175] script changes for windows --- .../scripts/tomlparser-prom-customconfig.rb | 15 +++++++++++---- kubernetes/omsagent.yaml | 4 ++-- kubernetes/windows/Dockerfile | 1 + 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index fe360e296..ee34d2bb0 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -1,6 +1,12 @@ #!/usr/local/bin/ruby -require_relative "tomlrb" +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end +# require_relative "tomlrb" require_relative "ConfigParseErrorLogger" require "fileutils" @@ -129,7 +135,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Checking to see if this is the daemonset or replicaset to parse config accordingly controller = ENV["CONTROLLER_TYPE"] containerType = ENV["CONTAINER_TYPE"] - containerOs = ENV["CONTAINER_OS"] + # containerOs = ENV["CONTAINER_OS"] if !controller.nil? if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? @@ -212,7 +218,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "****************End Prometheus Config Processing********************" end elsif controller.casecmp(@daemonset) == 0 && - ((!containerType.nil? && containerType.casecmp(@promSideCar) == 0) || ((!containerOs.nil? && containerOs.casecmp(@windows) == 0))) && + ((!containerType.nil? && containerType.casecmp(@promSideCar) == 0) || + (!@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0)) && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? #Get prometheus custom config settings for monitor kubernetes pods begin @@ -240,7 +247,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors - if (!containerOs.nil? && containerOs.casecmp(@windows) == 0) + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 file_name = "/etc/telegraf/telegraf.conf" else file_name = "/opt/telegraf-test-prom-side-car.conf" diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index a026ad2a6..5de4ec17b 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -783,8 +783,8 @@ spec: # value: "my_acs_cluster_name" - name: CONTROLLER_TYPE value: "DaemonSet" - - name: CONTAINER_OS - value: "Windows" + # - name: CONTAINER_OS + # value: "Windows" - name: HOSTNAME valueFrom: fieldRef: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index ac7094212..d359bd94f 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -47,6 +47,7 @@ RUN ./setup.ps1 COPY main.ps1 /opt/omsagentwindows/scripts/powershell COPY ./omsagentwindows/installer/scripts/filesystemwatcher.ps1 /opt/omsagentwindows/scripts/powershell COPY ./omsagentwindows/installer/scripts/livenessprobe.cmd /opt/omsagentwindows/scripts/cmd/ +COPY setdefaulttelegrafenvvariables.ps1 /opt/omsagentwindows/scripts/powershell # copy ruby scripts to /opt folder COPY ./omsagentwindows/installer/scripts/*.rb /opt/omsagentwindows/scripts/ruby/ From a1e5db47433954dcc384507ba3eb52c0d14808f3 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 25 Feb 2021 12:25:44 -0800 Subject: [PATCH 128/175] adding comment --- build/common/installer/scripts/tomlparser-prom-customconfig.rb | 1 + 1 file changed, 1 insertion(+) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index ee34d2bb0..9560263cd 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -1,5 +1,6 @@ #!/usr/local/bin/ruby +#this should be require relative in Linux and require in windows, since it is a gem install on windows @os_type = ENV["OS_TYPE"] if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 require "tomlrb" From 56b524ec21d19d9f0aa002040dc9d983219acce8 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 26 Feb 2021 14:48:10 -0800 Subject: [PATCH 129/175] add kube.conf check to all places in main.sh --- kubernetes/linux/main.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index fc1a36f18..e6f002a37 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -248,7 +248,7 @@ done source integration_npm_config_env_var #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset -if [ ! -e "/etc/config/kube.conf" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "Prometheus-Sidecar" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb fi @@ -563,7 +563,7 @@ fi #start oneagent -if [ ! -e "/etc/config/kube.conf" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "Prometheus-Sidecar" ]; then if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" From 6197e42dde8c29ed3819365631831dceecba1832 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 26 Feb 2021 17:09:23 -0800 Subject: [PATCH 130/175] binding diff port for monitoring agent conf --- kubernetes/linux/main.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index e6f002a37..748292bb6 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -5,6 +5,10 @@ if [ -e "/etc/config/kube.conf" ]; then elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then echo "rashmi-in-ds-prom-omsagent-conf" cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf + # omsadmin.sh replaces %MONITOR_AGENT_PORT% in the monitor.conf with default port 25324. Since we are running 2 omsagents in the same pod, + # we need to use a different port for the sidecar, else we will see the Address already in use - bind(2) for 0.0.0.0:25324 error. + # Look into omsadmin.sh scripts's configure_monitor_agent() and find_available_port() methods for more info. + sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25325/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf else echo "rashmi-in-ds-omsagent-conf" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf From 62528cd6d67e4e6e3cd108c03f33d497f498068a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 26 Feb 2021 19:17:50 -0800 Subject: [PATCH 131/175] update syslog port --- kubernetes/linux/main.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 748292bb6..3f4c5683d 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -5,10 +5,12 @@ if [ -e "/etc/config/kube.conf" ]; then elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then echo "rashmi-in-ds-prom-omsagent-conf" cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf - # omsadmin.sh replaces %MONITOR_AGENT_PORT% in the monitor.conf with default port 25324. Since we are running 2 omsagents in the same pod, - # we need to use a different port for the sidecar, else we will see the Address already in use - bind(2) for 0.0.0.0:25324 error. - # Look into omsadmin.sh scripts's configure_monitor_agent() and find_available_port() methods for more info. + # omsadmin.sh replaces %MONITOR_AGENT_PORT% and %SYSLOG_PORT% in the monitor.conf and syslog.conf with default ports 25324 and 25224. + # Since we are running 2 omsagents in the same pod, we need to use a different port for the sidecar, + # else we will see the Address already in use - bind(2) for 0.0.0.0:253(2)24 error. + # Look into omsadmin.sh scripts's configure_monitor_agent()/configure_syslog() and find_available_port() methods for more info. sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25325/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf + sed -i -e 's/port %SYSLOG_PORT%/port 25225/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf else echo "rashmi-in-ds-omsagent-conf" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf From 62c6ef6037045bcbb26e63cc698c8cbef5209cd7 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 26 Feb 2021 19:24:59 -0800 Subject: [PATCH 132/175] fixing typo --- kubernetes/linux/main.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 3f4c5683d..cf15f44f8 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -10,7 +10,7 @@ elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then # else we will see the Address already in use - bind(2) for 0.0.0.0:253(2)24 error. # Look into omsadmin.sh scripts's configure_monitor_agent()/configure_syslog() and find_available_port() methods for more info. sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25325/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf - sed -i -e 's/port %SYSLOG_PORT%/port 25225/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf + sed -i -e 's/port %SYSLOG_PORT%/port 25225/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf else echo "rashmi-in-ds-omsagent-conf" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf From 258a2d6e4caebad9f894b8920c7bfb270d5254a1 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Mar 2021 12:23:30 -0800 Subject: [PATCH 133/175] updating ports --- kubernetes/linux/main.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index cf15f44f8..fc4673ccb 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -9,8 +9,8 @@ elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then # Since we are running 2 omsagents in the same pod, we need to use a different port for the sidecar, # else we will see the Address already in use - bind(2) for 0.0.0.0:253(2)24 error. # Look into omsadmin.sh scripts's configure_monitor_agent()/configure_syslog() and find_available_port() methods for more info. - sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25325/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf - sed -i -e 's/port %SYSLOG_PORT%/port 25225/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf + sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25326/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf + sed -i -e 's/port %SYSLOG_PORT%/port 25226/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf else echo "rashmi-in-ds-omsagent-conf" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf From be4fa525c13d805b96924983fea5a70eb03acb19 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 1 Mar 2021 14:10:14 -0800 Subject: [PATCH 134/175] adding windows telemetry --- source/plugins/go/src/telemetry.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 823feebcb..2e4bdaa07 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -82,6 +82,7 @@ const ( eventNameContainerLogInit = "ContainerLogPluginInitialized" eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" eventNameCustomPrometheusSidecarHeartbeat = "CustomPrometheusSidecarHeartbeatEvent" + eventNameWindowsFluentBitHeartbeat = "WindowsFluentBitHeartbeatEvent" ) // SendContainerLogPluginMetrics is a go-routine that flushes the data periodically (every 5 mins to App Insights) @@ -153,6 +154,8 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) + } else if strings.Compare(strings.ToLower(os.Getenv("OS_TYPE")), "windows") == 0 { + SendEvent(eventNameWindowsFluentBitHeartbeat, make(map[string]string)) } else { SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) From a834912d9a30a74447db8d6309a2b69368752f21 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 2 Mar 2021 17:44:41 -0800 Subject: [PATCH 135/175] adding some phased rollout logic --- .../scripts/tomlparser-prom-customconfig.rb | 10 ++++----- build/linux/installer/conf/telegraf-rs.conf | 21 ++++++++++++------- kubernetes/linux/defaultpromenvvariables-rs | 18 +++++++++------- 3 files changed, 30 insertions(+), 19 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 9560263cd..18190fd70 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -173,13 +173,13 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "config::Starting to substitute the placeholders in telegraf conf copy file for replicaset" #Replace the placeholder config values with values from custom config text = File.read(file_name) - new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) + new_contents = text.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", interval) fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", fieldPassSetting) fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting) - new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", fieldDropSetting) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index b63bbac22..155641484 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -540,13 +540,13 @@ #Prometheus Custom Metrics [[inputs.prometheus]] - interval = "$AZMON_RS_PROM_INTERVAL" + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" ## An array of urls to scrape metrics from. - urls = $AZMON_RS_PROM_URLS + urls = $AZMON_TELEGRAF_CUSTOM_PROM_URLS ## An array of Kubernetes services to scrape metrics from. - kubernetes_services = $AZMON_RS_PROM_K8S_SERVICES + kubernetes_services = $AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod @@ -554,10 +554,13 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - # $AZMON_RS_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - fieldpass = $AZMON_RS_PROM_FIELDPASS - fielddrop = $AZMON_RS_PROM_FIELDDROP + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP metric_version = 2 url_tag = "scrapeUrl" @@ -581,7 +584,11 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -# $AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER + +## OSM Prometheus configuration +$AZMON_TELEGRAF_OSM_PROM_PLUGINS + # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/kubernetes/linux/defaultpromenvvariables-rs b/kubernetes/linux/defaultpromenvvariables-rs index 1346e62b9..46256d625 100644 --- a/kubernetes/linux/defaultpromenvvariables-rs +++ b/kubernetes/linux/defaultpromenvvariables-rs @@ -1,7 +1,11 @@ -export AZMON_RS_PROM_INTERVAL="1m" -export AZMON_RS_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" -export AZMON_RS_PROM_FIELDPASS="[]" -export AZMON_RS_PROM_FIELDDROP="[]" -export AZMON_RS_PROM_URLS="[]" -export AZMON_RS_PROM_K8S_SERVICES="[]" -export AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_URLS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = ''" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = ''" + From 0e13323c48272573a0020b47b0a7984dbc4494b6 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Mar 2021 17:59:06 -0800 Subject: [PATCH 136/175] phased rollout changes --- .../scripts/tomlparser-prom-customconfig.rb | 85 +++++++++++++------ .../conf/telegraf-prom-side-car.conf | 3 +- build/linux/installer/conf/telegraf-rs.conf | 1 + .../scripts/tomlparser-osm-config.rb | 44 +--------- build/windows/installer/conf/telegraf.conf | 2 +- kubernetes/linux/defaultpromenvvariables-rs | 1 + .../linux/defaultpromenvvariables-sidecar | 1 + kubernetes/linux/main.sh | 8 -- kubernetes/omsagent.yaml | 3 + source/plugins/go/src/telemetry.go | 4 +- source/plugins/ruby/in_kube_nodes.rb | 8 ++ 11 files changed, 79 insertions(+), 81 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 18190fd70..e66c6a7ae 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -43,6 +43,11 @@ @tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" @insecureSkipVerify = true +# Checking to see if this is the daemonset or replicaset to parse config accordingly +@controller = ENV["CONTROLLER_TYPE"] +@containerType = ENV["CONTAINER_TYPE"] +@sidecarScrapingEnabled = ENV["SIDECAR_SCRAPING_ENABLED"] + # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap begin @@ -82,6 +87,7 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern begin puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", ("pod_scrape_scope = #{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) @@ -108,7 +114,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] interval = \"#{interval}\" monitor_kubernetes_pods = true - monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} + pod_scrape_scope = #{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"} monitor_kubernetes_pods_namespace = \"#{namespace}\" kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" @@ -133,13 +139,10 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu # Use the ruby structure created after config parsing to set the right values to be used as environment variables def populateSettingValuesFromConfigMap(parsedConfig) - # Checking to see if this is the daemonset or replicaset to parse config accordingly - controller = ENV["CONTROLLER_TYPE"] - containerType = ENV["CONTAINER_TYPE"] # containerOs = ENV["CONTAINER_OS"] - if !controller.nil? + if !@controller.nil? if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? - if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + if @controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? #Get prometheus replicaset custom config settings begin interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] @@ -147,16 +150,23 @@ def populateSettingValuesFromConfigMap(parsedConfig) fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] - # monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] - # monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + + # Remove below 4 lines after phased rollout + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] + kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] # Check for the right datattypes to enforce right setting values if checkForType(interval, String) && checkForTypeArray(fieldPass, String) && checkForTypeArray(fieldDrop, String) && checkForTypeArray(kubernetesServices, String) && - checkForTypeArray(urls, String) - # (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) # Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + checkForTypeArray(urls, String) && + # Remove below check after phased rollout + checkForType(kubernetesLabelSelectors, String) && + checkForType(kubernetesFieldSelectors, String) && + (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) # Checking for Boolean type, since 'Boolean' is not defined as a type in ruby puts "config::Successfully passed typecheck for config settings for replicaset" #if setting is nil assign default values interval = (interval.nil?) ? @defaultRsInterval : interval @@ -164,7 +174,10 @@ def populateSettingValuesFromConfigMap(parsedConfig) fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices urls = (urls.nil?) ? @defaultRsPromUrls : urls - # monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + # Remove below lines after phased rollout + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors file_name = "/opt/telegraf-test-rs.conf" # Copy the telegraf config file to a temp file to run telegraf in test mode with this config @@ -184,13 +197,28 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - # - to use defaults in case of nil settings - # if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) - # new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) - # monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length - # else - # new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - # monitorKubernetesPodsNamespacesLength = 0 - # end + # Remove below block after phased rollout + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0)) + monitorKubernetesPodsNSConfig = [] + if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (monitorKubernetesPodsNamespaces.length > 0) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. + # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. + kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length + end File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" @@ -203,8 +231,13 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") - # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") - # file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + # Remove below block after phased rollout + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0)) + file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + end # Close file after writing all environment variables file.close @@ -218,8 +251,8 @@ def populateSettingValuesFromConfigMap(parsedConfig) setRsPromDefaults puts "****************End Prometheus Config Processing********************" end - elsif controller.casecmp(@daemonset) == 0 && - ((!containerType.nil? && containerType.casecmp(@promSideCar) == 0) || + elsif @controller.casecmp(@daemonset) == 0 && + ((!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) || (!@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0)) && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? #Get prometheus custom config settings for monitor kubernetes pods @@ -291,14 +324,14 @@ def populateSettingValuesFromConfigMap(parsedConfig) File.open(file_name, "w") { |file| file.puts new_contents } puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" #Set environment variables for telemetry in the sidecar container - if (!containerType.nil? && containerType.casecmp(@promSideCar) == 0) + if (!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) file = File.open("telemetry_prom_config_env_var", "w") if !file.nil? #Setting array lengths as environment variables for telemetry purposes file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") - file.write("export TELEMETRY_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") - file.write("export TELEMETRY_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") # Close file after writing all environment variables file.close @@ -314,7 +347,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) #setRsPromDefaults puts "****************End Prometheus Config Processing********************" end - elsif controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? + elsif @controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? #Get prometheus daemonset custom config settings begin interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 585b7b423..862c932a1 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -742,7 +742,8 @@ ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - monitor_kubernetes_pods_version = 2 + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index 155641484..4d6079145 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -555,6 +555,7 @@ ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index fb752de0a..0421b12ac 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -1,13 +1,6 @@ #!/usr/local/bin/ruby -#this should be require relative in Linux and require in windows, since it is a gem install on windows -@os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end - +require_relative "tomlrb" require "fileutils" require_relative "ConfigParseErrorLogger" @@ -145,41 +138,6 @@ def replaceOsmTelegrafConfigPlaceHolders replaceOsmTelegrafConfigPlaceHolders() -# #replace place holders in configuration file -# tgfConfig = File.read(@tgfTestConfigFile) #read returns only after closing the file - -# if @osmMetricNamespaces.length > 0 -# osmPluginConfigsWithNamespaces = "" -# @osmMetricNamespaces.each do |namespace| -# if !namespace.nil? -# #Stripping namespaces to remove leading and trailing whitespaces -# namespace.strip! -# if namespace.length > 0 -# osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] -# name_prefix=\"container.azm.ms.osm/\" -# interval = \"#{@scrapeInterval}\" -# monitor_kubernetes_pods = true -# monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} -# monitor_kubernetes_pods_namespace = \"#{namespace}\" -# fieldpass = #{@fieldPassSetting} -# metric_version = #{@metricVersion} -# url_tag = \"#{@urlTag}\" -# bearer_token = \"#{@bearerToken}\" -# response_timeout = \"#{@responseTimeout}\" -# tls_ca = \"#{@tlsCa}\" -# insecure_skip_verify = #{@insecureSkipVerify}\n" -# end -# end -# end -# tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) -# else -# puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" -# tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", "") -# end - -# File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope -# puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" - # Write the telemetry to file, so that they can be set as environment variables telemetryFile = File.open("integration_osm_config_env_var", "w") diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf index be2ddfced..7bdfd0a9a 100644 --- a/build/windows/installer/conf/telegraf.conf +++ b/build/windows/installer/conf/telegraf.conf @@ -736,7 +736,7 @@ ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - monitor_kubernetes_pods_version = 2 + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR diff --git a/kubernetes/linux/defaultpromenvvariables-rs b/kubernetes/linux/defaultpromenvvariables-rs index 46256d625..9bf570e3a 100644 --- a/kubernetes/linux/defaultpromenvvariables-rs +++ b/kubernetes/linux/defaultpromenvvariables-rs @@ -1,5 +1,6 @@ export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE="pod_scrape_scope = 'cluster'" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" export AZMON_TELEGRAF_CUSTOM_PROM_URLS="[]" diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index be3a128f6..84a0c56aa 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -1,5 +1,6 @@ export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE="pod_scrape_scope = 'node'" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index fc4673ccb..9ac932609 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -335,14 +335,6 @@ if [ ! -e "/etc/config/kube.conf" ]; then echo $line >> ~/.bashrc done source integration_osm_config_env_var - - #Sourcing prometheus side car config settings if it exists - # if [ -e "prom_config_shared_settings_env_var" ]; then - # cat prom_config_shared_settings_env_var | while read line; do - # echo $line >> ~/.bashrc - # done - # source prom_config_shared_settings_env_var - # fi fi fi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 5de4ec17b..b42ed0f9b 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -635,6 +635,9 @@ spec: # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" + # Add the below environment variable to true only in sidecar enabled regions, else set it to false + - name: SIDECAR_SCRAPING_ENABLED + value: "true" securityContext: privileged: true ports: diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 2e4bdaa07..d55617a6d 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -316,14 +316,14 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { Log("Custom prometheus monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) PromMonitorPodsNamespaceLength = 0 } - promLabelSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR_LENGTH") + promLabelSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH") PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) if err != nil { Log("Custom prometheus label selector count string to int conversion error %s", err.Error()) PromMonitorPodsLabelSelectorLength = 0 } - promFieldSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR_LENGTH") + promFieldSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH") PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) if err != nil { Log("Custom prometheus field selector count string to int conversion error %s", err.Error()) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 4af9a2d20..6106a3e83 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -17,6 +17,10 @@ class Kube_nodeInventory_Input < Input @@rsPromFieldDropCount = ENV["TELEMETRY_RS_PROM_FIELDDROP_LENGTH"] @@rsPromK8sServiceCount = ENV["TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH"] @@rsPromUrlCount = ENV["TELEMETRY_RS_PROM_URLS_LENGTH"] + @@rsPromMonitorPods = ENV["TELEMETRY_RS_PROM_MONITOR_PODS"] + @@rsPromMonitorPodsNamespaceLength = ENV["TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH"] + @@rsPromMonitorPodsLabelSelectorLength = ENV["TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH"] + @@rsPromMonitorPodsFieldSelectorLength = ENV["TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH"] @@collectAllKubeEvents = ENV["AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS"] def initialize @@ -283,6 +287,10 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromFDC"] = @@rsPromFieldDropCount properties["rsPromServ"] = @@rsPromK8sServiceCount properties["rsPromUrl"] = @@rsPromUrlCount + properties["rsPromMonPods"] = @@rsPromMonitorPods + properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength + properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength + properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) telemetrySent = true From eea93e0bc6a994ea68e770dd700eec51c2817f0a Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 3 Mar 2021 18:20:06 -0800 Subject: [PATCH 137/175] some more --- .../scripts/tomlparser-prom-customconfig.rb | 2 +- kubernetes/linux/main.sh | 14 ++++++-------- kubernetes/omsagent.yaml | 2 ++ .../windows/setdefaulttelegrafenvvariables.ps1 | 2 ++ 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index e66c6a7ae..bf0f4d6c5 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -253,7 +253,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) end elsif @controller.casecmp(@daemonset) == 0 && ((!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) || - (!@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0)) && + (!@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0) && @sidecarScrapingEnabled.strip.casecmp("true") == 0) && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? #Get prometheus custom config settings for monitor kubernetes pods begin diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 9ac932609..610d55de2 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -327,15 +327,13 @@ cat config_metric_collection_env_var | while read line; do done source config_metric_collection_env_var -if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source integration_osm_config_env_var - fi + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index b42ed0f9b..398d4429d 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -796,6 +796,8 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: SIDECAR_SCRAPING_ENABLED + value: "true" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 index 272de23f1..2bfedc0a8 100644 --- a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 +++ b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 @@ -2,6 +2,8 @@ [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "machine") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "process") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", "pod_scrape_scope = 'node'", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", "pod_scrape_scope = 'node'", "machine") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "process") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "machine") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "process") From ef0a030167f34c0e887a978add0acc1e3450d6ea Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Mar 2021 12:25:48 -0800 Subject: [PATCH 138/175] some changes --- .../linux/installer/scripts/livenessprobe.sh | 5 +-- kubernetes/linux/main.sh | 35 +++++++++++-------- kubernetes/omsagent.yaml | 7 ++++ 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 61b6310c3..3687040cb 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -33,8 +33,9 @@ then exit 1 fi -# Perform the following check only for prometheus sidecar that does OSM scraping -if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +# Perform the following check only for prometheus sidecar that does OSM scraping or for replicaset when sidecar scraping is disabled +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then if [ -s "inotifyoutput-osm.txt" ] then # inotifyoutput-osm file has data(config map was applied) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 610d55de2..46d648c0e 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -39,7 +39,8 @@ sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' #Run inotify as a daemon to track changes to the mounted configmap for OSM settings. -if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then inotifywait /etc/config/osm-settings --daemon --recursive --outfile "/opt/inotifyoutput-osm.txt" --event create,delete --format '%e : %T' --timefmt '+%s' fi @@ -84,18 +85,21 @@ if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/ fi #set OSM config schema version -if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then - #trim - osm_config_schema_version="$(cat /etc/config/osm-settings/schema-version | xargs)" - #remove all spaces - osm_config_schema_version="${osm_config_schema_version//[[:space:]]/}" - #take first 10 characters - osm_config_schema_version="$(echo $osm_config_schema_version| cut -c1-10)" - - export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version - echo "export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version" >> ~/.bashrc - source ~/.bashrc - echo "AZMON_OSM_CFG_SCHEMA_VERSION:$AZMON_OSM_CFG_SCHEMA_VERSION" +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then + if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then + #trim + osm_config_schema_version="$(cat /etc/config/osm-settings/schema-version | xargs)" + #remove all spaces + osm_config_schema_version="${osm_config_schema_version//[[:space:]]/}" + #take first 10 characters + osm_config_schema_version="$(echo $osm_config_schema_version| cut -c1-10)" + + export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version + echo "export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version" >> ~/.bashrc + source ~/.bashrc + echo "AZMON_OSM_CFG_SCHEMA_VERSION:$AZMON_OSM_CFG_SCHEMA_VERSION" + fi fi export PROXY_ENDPOINT="" @@ -327,7 +331,10 @@ cat config_metric_collection_env_var | while read line; do done source config_metric_collection_env_var -if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + +# OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb cat integration_osm_config_env_var | while read line; do diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 398d4429d..39d535c12 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -666,6 +666,9 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true livenessProbe: exec: command: @@ -737,6 +740,10 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true --- apiVersion: apps/v1 kind: DaemonSet From 9707bcf4f3471ba2e2a0c2891a3ab7925edb36a2 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Mar 2021 12:38:49 -0800 Subject: [PATCH 139/175] changes --- .../scripts/tomlparser-prom-customconfig.rb | 4 ++-- .../linux/installer/scripts/livenessprobe.sh | 2 +- .../scripts/tomlparser-osm-config.rb | 12 +++++++++++ kubernetes/linux/main.sh | 20 +++++++++---------- kubernetes/omsagent.yaml | 2 +- source/plugins/go/src/telemetry.go | 4 ++-- 6 files changed, 28 insertions(+), 16 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index bf0f4d6c5..efc14aed0 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -14,7 +14,7 @@ @promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" @replicaset = "replicaset" @daemonset = "daemonset" -@promSideCar = "prometheus-sidecar" +@promSideCar = "prometheussidecar" @windows = "windows" @configSchemaVersion = "" @defaultDsInterval = "1m" @@ -198,7 +198,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - # - to use defaults in case of nil settings # Remove below block after phased rollout - if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0)) + if ((!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0)) monitorKubernetesPodsNSConfig = [] if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) # Adding a check to see if an empty array is passed for kubernetes namespaces diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 3687040cb..a82fa28eb 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -34,7 +34,7 @@ then fi # Perform the following check only for prometheus sidecar that does OSM scraping or for replicaset when sidecar scraping is disabled -if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then if [ -s "inotifyoutput-osm.txt" ] then diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 0421b12ac..7fba6de46 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -4,6 +4,18 @@ require "fileutils" require_relative "ConfigParseErrorLogger" +@controllerType = ENV["CONTROLLER_TYPE"] +@containerType = ENV["CONTAINER_TYPE"] +@sidecarScrapingEnabled = ENV["SIDECAR_SCRAPING_ENABLED"] + +if !@controllerType.nil? && !@controllerType.empty? && @controllerType.strip.casecmp("replicaset") == 0 && + !@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0 + require "tomlrb" +elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp("prometheussidecar") == 0 && + !@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0 + require_relative "tomlrb" +end + @configMapMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @configSchemaVersion = "" @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 46d648c0e..f6bbc6619 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -2,7 +2,7 @@ if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then echo "rashmi-in-ds-prom-omsagent-conf" cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf # omsadmin.sh replaces %MONITOR_AGENT_PORT% and %SYSLOG_PORT% in the monitor.conf and syslog.conf with default ports 25324 and 25224. @@ -39,7 +39,7 @@ sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' #Run inotify as a daemon to track changes to the mounted configmap for OSM settings. -if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then inotifywait /etc/config/osm-settings --daemon --recursive --outfile "/opt/inotifyoutput-osm.txt" --event create,delete --format '%e : %T' --timefmt '+%s' fi @@ -85,7 +85,7 @@ if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/ fi #set OSM config schema version -if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then #trim @@ -258,14 +258,14 @@ done source integration_npm_config_env_var #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset -if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "Prometheus-Sidecar" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb fi #Parse the OSM configmap to set the right environment variables for metric collection settings #This needs to be done before the prometheus custom config map parsing since we have namespace duplication logic in place. # if [ ! -e "/etc/config/kube.conf" ]; then -# if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then +# if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then # /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb # cat integration_osm_config_env_var | while read line; do @@ -288,7 +288,7 @@ fi #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then cat defaultpromenvvariables-sidecar | while read line; do echo $line >> ~/.bashrc done @@ -333,7 +333,7 @@ source config_metric_collection_env_var # OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) -if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ) ) || +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb @@ -566,7 +566,7 @@ fi #start oneagent -if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "Prometheus-Sidecar" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" @@ -609,7 +609,7 @@ echo "************end oneagent log routing checks************" #If config parsing was successful, a copy of the conf file with replaced custom settings file is created if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then echo "****************Start Telegraf in Test Mode**************************" /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf -test if [ $? -eq 0 ]; then @@ -639,7 +639,7 @@ fi #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then - if [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then echo "in side car................" /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 39d535c12..a3615e6fd 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -460,7 +460,7 @@ spec: #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" - name: CONTAINER_TYPE - value: "Prometheus-Sidecar" + value: "PrometheusSidecar" - name: CONTROLLER_TYPE value: "DaemonSet" - name: NODE_IP diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index d55617a6d..e35af6dc9 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -135,9 +135,9 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { - if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheus-sidecar") == 0 { + if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { telemetryDimensions := make(map[string]string) - telemetryDimensions["ContainerType"] = "prometheus-sidecar" + telemetryDimensions["ContainerType"] = "prometheussidecar" telemetryDimensions["CustomPromMonitorPods"] = promMonitorPods if promMonitorPodsNamespaceLength > 0 { telemetryDimensions["CustomPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) From 204806baea3cef741c73197785204899ebb17205 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Mar 2021 15:28:12 -0800 Subject: [PATCH 140/175] some more --- .../scripts/tomlparser-prom-customconfig.rb | 1 + .../scripts/tomlparser-osm-config.rb | 25 +- kubernetes/linux/main - Copy.sh | 623 ------------------ kubernetes/linux/main.sh | 21 - kubernetes/windows/main.ps1 | 9 +- kubernetes/windows/setup.ps1 | 34 +- 6 files changed, 43 insertions(+), 670 deletions(-) delete mode 100644 kubernetes/linux/main - Copy.sh diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index efc14aed0..4559bceaa 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -104,6 +104,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE") pluginConfigsWithNamespaces = "" monitorKubernetesPodsNamespaces.each do |namespace| diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 7fba6de46..5f58a934a 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -8,18 +8,23 @@ @containerType = ENV["CONTAINER_TYPE"] @sidecarScrapingEnabled = ENV["SIDECAR_SCRAPING_ENABLED"] -if !@controllerType.nil? && !@controllerType.empty? && @controllerType.strip.casecmp("replicaset") == 0 && +@replicaset = "replicaset" +@prometheusSidecar = "prometheussidecar" + +if !@controllerType.nil? && !@controllerType.empty? && @controllerType.strip.casecmp(@replicaset) == 0 && !@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0 - require "tomlrb" -elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp("prometheussidecar") == 0 && + @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + @tgfTestConfigFile = "/opt/telegraf-test-rs.conf" +elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp(@prometheusSidecar) == 0 && !@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0 - require_relative "tomlrb" + @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + @tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" end @configMapMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @configSchemaVersion = "" -@tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" -@tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" +# @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" +# @tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" @osmMetricNamespaces = [] #Configurations to be used for the auto-generated input prometheus plugins for namespace filtering @@ -102,7 +107,7 @@ def replaceOsmTelegrafConfigPlaceHolders name_prefix=\"container.azm.ms.osm/\" interval = \"#{@scrapeInterval}\" monitor_kubernetes_pods = true - monitor_kubernetes_pods_version = #{@monitorKubernetesPodsVersion} + pod_scrape_scope = #{(@controllerType.casecmp(@replicaset) == 0) ? "cluster" : "node"} monitor_kubernetes_pods_namespace = \"#{namespace}\" fieldpass = #{@fieldPassSetting} metric_version = #{@metricVersion} @@ -141,11 +146,11 @@ def replaceOsmTelegrafConfigPlaceHolders end # Check to see if the prometheus custom config parser has created a test config file so that we can replace the settings in the test file and run it, If not create -# a test config file by copying contents of the actual sidecar telegraf config file. +# a test config file by copying contents of the actual telegraf config file. if (!File.exist?(@tgfTestConfigFile)) # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - puts "test telegraf sidecar config file #{@tgfTestConfigFile} does not exist, creating new one" - FileUtils.cp(@tgfConfigFileSidecar, @tgfTestConfigFile) + puts "test telegraf config file #{@tgfTestConfigFile} does not exist, creating new one" + FileUtils.cp(@tgfConfigFile, @tgfTestConfigFile) end replaceOsmTelegrafConfigPlaceHolders() diff --git a/kubernetes/linux/main - Copy.sh b/kubernetes/linux/main - Copy.sh deleted file mode 100644 index 0b5ad5d39..000000000 --- a/kubernetes/linux/main - Copy.sh +++ /dev/null @@ -1,623 +0,0 @@ -#!/bin/bash - -if [ -e "/etc/config/kube.conf" ]; then - echo "rashmi-in-rs-omsagent-conf" - cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - echo "rashmi-in-ds-prom-omsagent-conf" - cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -else - echo "rashmi-in-ds-omsagent-conf" - sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf -fi -sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf -sed -i -e 's/^exit 101$/exit 0/g' /usr/sbin/policy-rc.d - -#Using the get_hostname for hostname instead of the host field in syslog messages -sed -i.bak "s/record\[\"Host\"\] = hostname/record\[\"Host\"\] = OMS::Common.get_hostname/" /opt/microsoft/omsagent/plugin/filter_syslog.rb - -#using /var/opt/microsoft/docker-cimprov/state instead of /var/opt/microsoft/omsagent/state since the latter gets deleted during onboarding -mkdir -p /var/opt/microsoft/docker-cimprov/state - -#if [ ! -e "/etc/config/kube.conf" ]; then - # add permissions for omsagent user to access docker.sock - #sudo setfacl -m user:omsagent:rw /var/run/host/docker.sock -#fi - -# add permissions for omsagent user to access azure.json. -sudo setfacl -m user:omsagent:r /etc/kubernetes/host/azure.json - -# add permission for omsagent user to log folder. We also need 'x', else log rotation is failing. TODO: Investigate why. -sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log - -#Run inotify as a daemon to track changes to the mounted configmap. -inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' - -#resourceid override for loganalytics data. -if [ -z $AKS_RESOURCE_ID ]; then - echo "not setting customResourceId" -else - export customResourceId=$AKS_RESOURCE_ID - echo "export customResourceId=$AKS_RESOURCE_ID" >> ~/.bashrc - source ~/.bashrc - echo "customResourceId:$customResourceId" -fi - -#set agent config schema version -if [ -e "/etc/config/settings/schema-version" ] && [ -s "/etc/config/settings/schema-version" ]; then - #trim - config_schema_version="$(cat /etc/config/settings/schema-version | xargs)" - #remove all spaces - config_schema_version="${config_schema_version//[[:space:]]/}" - #take first 10 characters - config_schema_version="$(echo $config_schema_version| cut -c1-10)" - - export AZMON_AGENT_CFG_SCHEMA_VERSION=$config_schema_version - echo "export AZMON_AGENT_CFG_SCHEMA_VERSION=$config_schema_version" >> ~/.bashrc - source ~/.bashrc - echo "AZMON_AGENT_CFG_SCHEMA_VERSION:$AZMON_AGENT_CFG_SCHEMA_VERSION" -fi - -#set agent config file version -if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/config-version" ]; then - #trim - config_file_version="$(cat /etc/config/settings/config-version | xargs)" - #remove all spaces - config_file_version="${config_file_version//[[:space:]]/}" - #take first 10 characters - config_file_version="$(echo $config_file_version| cut -c1-10)" - - export AZMON_AGENT_CFG_FILE_VERSION=$config_file_version - echo "export AZMON_AGENT_CFG_FILE_VERSION=$config_file_version" >> ~/.bashrc - source ~/.bashrc - echo "AZMON_AGENT_CFG_FILE_VERSION:$AZMON_AGENT_CFG_FILE_VERSION" -fi - -export PROXY_ENDPOINT="" - -# Check for internet connectivity or workspace deletion -if [ -e "/etc/omsagent-secret/WSID" ]; then - workspaceId=$(cat /etc/omsagent-secret/WSID) - if [ -e "/etc/omsagent-secret/DOMAIN" ]; then - domain=$(cat /etc/omsagent-secret/DOMAIN) - else - domain="opinsights.azure.com" - fi - - if [ -e "/etc/omsagent-secret/PROXY" ]; then - export PROXY_ENDPOINT=$(cat /etc/omsagent-secret/PROXY) - # Validate Proxy Endpoint URL - # extract the protocol:// - proto="$(echo $PROXY_ENDPOINT | grep :// | sed -e's,^\(.*://\).*,\1,g')" - # convert the protocol prefix in lowercase for validation - proxyprotocol=$(echo $proto | tr "[:upper:]" "[:lower:]") - if [ "$proxyprotocol" != "http://" -a "$proxyprotocol" != "https://" ]; then - echo "-e error proxy endpoint should be in this format http(s)://:@:" - fi - # remove the protocol - url="$(echo ${PROXY_ENDPOINT/$proto/})" - # extract the creds - creds="$(echo $url | grep @ | cut -d@ -f1)" - user="$(echo $creds | cut -d':' -f1)" - pwd="$(echo $creds | cut -d':' -f2)" - # extract the host and port - hostport="$(echo ${url/$creds@/} | cut -d/ -f1)" - # extract host without port - host="$(echo $hostport | sed -e 's,:.*,,g')" - # extract the port - port="$(echo $hostport | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')" - - if [ -z "$user" -o -z "$pwd" -o -z "$host" -o -z "$port" ]; then - echo "-e error proxy endpoint should be in this format http(s)://:@:" - else - echo "successfully validated provided proxy endpoint is valid and expected format" - fi - fi - - if [ ! -z "$PROXY_ENDPOINT" ]; then - echo "Making curl request to oms endpint with domain: $domain and proxy: $PROXY_ENDPOINT" - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT - else - echo "Making curl request to oms endpint with domain: $domain" - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest - fi - - if [ $? -ne 0 ]; then - if [ ! -z "$PROXY_ENDPOINT" ]; then - echo "Making curl request to ifconfig.co with proxy: $PROXY_ENDPOINT" - RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co --proxy $PROXY_ENDPOINT` - else - echo "Making curl request to ifconfig.co" - RET=`curl --max-time 10 -s -o /dev/null -w "%{http_code}" ifconfig.co` - fi - if [ $RET -eq 000 ]; then - echo "-e error Error resolving host during the onboarding request. Check the internet connectivity and/or network policy on the cluster" - else - # Retrying here to work around network timing issue - if [ ! -z "$PROXY_ENDPOINT" ]; then - echo "ifconfig check succeeded, retrying oms endpoint with proxy..." - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest --proxy $PROXY_ENDPOINT - else - echo "ifconfig check succeeded, retrying oms endpoint..." - curl --max-time 10 https://$workspaceId.oms.$domain/AgentService.svc/LinuxAgentTopologyRequest - fi - - if [ $? -ne 0 ]; then - echo "-e error Error resolving host during the onboarding request. Workspace might be deleted." - else - echo "curl request to oms endpoint succeeded with retry." - fi - fi - else - echo "curl request to oms endpoint succeeded." - fi -else - echo "LA Onboarding:Workspace Id not mounted, skipping the telemetry check" -fi - -# Set environment variable for if public cloud by checking the workspace domain. -if [ -z $domain ]; then - ClOUD_ENVIRONMENT="unknown" -elif [ $domain == "opinsights.azure.com" ]; then - CLOUD_ENVIRONMENT="public" -else - CLOUD_ENVIRONMENT="national" -fi -export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT -echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc - -#Parse the configmap to set the right environment variables. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb - -cat config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source config_env_var - - -#Parse the configmap to set the right environment variables for agent config. -#Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb - -cat agent_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source agent_config_env_var - -#Parse the configmap to set the right environment variables for network policy manager (npm) integration. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb - -cat integration_npm_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source integration_npm_config_env_var - -#Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset -if [ ! -e "/etc/config/kube.conf" ]; then - /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb -fi - -#Parse the prometheus configmap to create a file with new custom settings. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb - -#If config parsing was successful, a copy of the conf file with replaced custom settings file is created -if [ ! -e "/etc/config/kube.conf" ]; then - if [ -e "/opt/telegraf-test.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi -else - if [ -e "/opt/telegraf-test-rs.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-rs.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi -fi - -#Setting default environment variables to be used in any case of failure in the above steps -if [ ! -e "/etc/config/kube.conf" ]; then - if [ -z "${CONTAINER_TYPE}" ]; then - cat defaultpromenvvariables | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables - elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - cat defaultpromenvvariables-rs | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables-rs - fi -fi - -#Sourcing telemetry environment variable file if it exists -if [ -e "telemetry_prom_config_env_var" ]; then - cat telemetry_prom_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source telemetry_prom_config_env_var -fi - -#Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb - -cat config_mdm_metrics_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_mdm_metrics_env_var - -#Parse the configmap to set the right environment variables for metric collection settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb - -cat config_metric_collection_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_metric_collection_env_var - -#Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request -echo "Making wget request to cadvisor endpoint with port 10250" -#Defaults to use port 10255 -cAdvisorIsSecure=false -RET_CODE=`wget --server-response https://$NODE_IP:10250/stats/summary --no-check-certificate --header="Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" 2>&1 | awk '/^ HTTP/{print $2}'` -if [ $RET_CODE -eq 200 ]; then - cAdvisorIsSecure=true -fi - -# default to docker since this is default in AKS as of now and change to containerd once this becomes default in AKS -export CONTAINER_RUNTIME="docker" -export NODE_NAME="" - -if [ "$cAdvisorIsSecure" = true ]; then - echo "Wget request using port 10250 succeeded. Using 10250" - export IS_SECURE_CADVISOR_PORT=true - echo "export IS_SECURE_CADVISOR_PORT=true" >> ~/.bashrc - export CADVISOR_METRICS_URL="https://$NODE_IP:10250/metrics" - echo "export CADVISOR_METRICS_URL=https://$NODE_IP:10250/metrics" >> ~/.bashrc - echo "Making curl request to cadvisor endpoint /pods with port 10250 to get the configured container runtime on kubelet" - podWithValidContainerId=$(curl -s -k -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://$NODE_IP:10250/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') -else - echo "Wget request using port 10250 failed. Using port 10255" - export IS_SECURE_CADVISOR_PORT=false - echo "export IS_SECURE_CADVISOR_PORT=false" >> ~/.bashrc - export CADVISOR_METRICS_URL="http://$NODE_IP:10255/metrics" - echo "export CADVISOR_METRICS_URL=http://$NODE_IP:10255/metrics" >> ~/.bashrc - echo "Making curl request to cadvisor endpoint with port 10255 to get the configured container runtime on kubelet" - podWithValidContainerId=$(curl -s http://$NODE_IP:10255/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') -fi - -if [ ! -z "$podWithValidContainerId" ]; then - containerRuntime=$(echo $podWithValidContainerId | jq -r '.status.containerStatuses[0].containerID' | cut -d ':' -f 1) - nodeName=$(echo $podWithValidContainerId | jq -r '.spec.nodeName') - # convert to lower case so that everywhere else can be used in lowercase - containerRuntime=$(echo $containerRuntime | tr "[:upper:]" "[:lower:]") - nodeName=$(echo $nodeName | tr "[:upper:]" "[:lower:]") - # update runtime only if its not empty, not null and not startswith docker - if [ -z "$containerRuntime" -o "$containerRuntime" == null ]; then - echo "using default container runtime as $CONTAINER_RUNTIME since got containeRuntime as empty or null" - elif [[ $containerRuntime != docker* ]]; then - export CONTAINER_RUNTIME=$containerRuntime - fi - - if [ -z "$nodeName" -o "$nodeName" == null ]; then - echo "-e error nodeName in /pods API response is empty" - else - export NODE_NAME=$nodeName - fi -else - echo "-e error either /pods API request failed or no running pods" -fi - -echo "configured container runtime on kubelet is : "$CONTAINER_RUNTIME -echo "export CONTAINER_RUNTIME="$CONTAINER_RUNTIME >> ~/.bashrc - -export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="kubelet_runtime_operations_total" -echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >> ~/.bashrc -export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="kubelet_runtime_operations_errors_total" -echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >> ~/.bashrc - -# default to docker metrics -export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_docker_operations" -export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_docker_operations_errors" - -if [ "$CONTAINER_RUNTIME" != "docker" ]; then - # these metrics are avialble only on k8s versions <1.18 and will get deprecated from 1.18 - export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_runtime_operations" - export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="kubelet_runtime_operations_errors" -else - #if container run time is docker then add omsagent user to local docker group to get access to docker.sock - # docker.sock only use for the telemetry to get the docker version - DOCKER_SOCKET=/var/run/host/docker.sock - DOCKER_GROUP=docker - REGULAR_USER=omsagent - if [ -S ${DOCKER_SOCKET} ]; then - echo "getting gid for docker.sock" - DOCKER_GID=$(stat -c '%g' ${DOCKER_SOCKET}) - echo "creating a local docker group" - groupadd -for -g ${DOCKER_GID} ${DOCKER_GROUP} - echo "adding omsagent user to local docker group" - usermod -aG ${DOCKER_GROUP} ${REGULAR_USER} - fi -fi - -echo "set caps for ruby process to read container env from proc" -sudo setcap cap_sys_ptrace,cap_dac_read_search+ep /opt/microsoft/omsagent/ruby/bin/ruby - -echo "export KUBELET_RUNTIME_OPERATIONS_METRIC="$KUBELET_RUNTIME_OPERATIONS_METRIC >> ~/.bashrc -echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC >> ~/.bashrc - -source ~/.bashrc - -echo $NODE_NAME > /var/opt/microsoft/docker-cimprov/state/containerhostname -#check if file was written successfully. -cat /var/opt/microsoft/docker-cimprov/state/containerhostname - - -#Commenting it for test. We do this in the installer now -#Setup sudo permission for containerlogtailfilereader -#chmod +w /etc/sudoers.d/omsagent -#echo "#run containerlogtailfilereader.rb for docker-provider" >> /etc/sudoers.d/omsagent -#echo "omsagent ALL=(ALL) NOPASSWD: /opt/microsoft/omsagent/ruby/bin/ruby /opt/microsoft/omsagent/plugin/containerlogtailfilereader.rb *" >> /etc/sudoers.d/omsagent -#chmod 440 /etc/sudoers.d/omsagent - -#Disable dsc -#/opt/microsoft/omsconfig/Scripts/OMS_MetaConfigHelper.py --disable -rm -f /etc/opt/microsoft/omsagent/conf/omsagent.d/omsconfig.consistencyinvoker.conf - -CIWORKSPACE_id="" -CIWORKSPACE_key="" - -if [ -z $INT ]; then - if [ -a /etc/omsagent-secret/PROXY ]; then - if [ -a /etc/omsagent-secret/DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -d `cat /etc/omsagent-secret/DOMAIN` -p `cat /etc/omsagent-secret/PROXY` - else - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -p `cat /etc/omsagent-secret/PROXY` - fi - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - elif [ -a /etc/omsagent-secret/DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` -d `cat /etc/omsagent-secret/DOMAIN` - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - elif [ -a /etc/omsagent-secret/WSID ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /etc/omsagent-secret/WSID` -s `cat /etc/omsagent-secret/KEY` - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - elif [ -a /run/secrets/DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /run/secrets/WSID` -s `cat /run/secrets/KEY` -d `cat /run/secrets/DOMAIN` - CIWORKSPACE_id="$(cat /run/secrets/WSID)" - CIWORKSPACE_key="$(cat /run/secrets/KEY)" - elif [ -a /run/secrets/WSID ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w `cat /run/secrets/WSID` -s `cat /run/secrets/KEY` - CIWORKSPACE_id="$(cat /run/secrets/WSID)" - CIWORKSPACE_key="$(cat /run/secrets/KEY)" - elif [ -z $DOMAIN ]; then - /opt/microsoft/omsagent/bin/omsadmin.sh -w $WSID -s $KEY - CIWORKSPACE_id="$(cat /etc/omsagent-secret/WSID)" - CIWORKSPACE_key="$(cat /etc/omsagent-secret/KEY)" - else - /opt/microsoft/omsagent/bin/omsadmin.sh -w $WSID -s $KEY -d $DOMAIN - CIWORKSPACE_id="$WSID" - CIWORKSPACE_key="$KEY" - fi -else -#To onboard to INT workspace - workspace-id (WSID-not base64 encoded), workspace-key (KEY-not base64 encoded), Domain(DOMAIN-int2.microsoftatlanta-int.com) -#need to be added to omsagent.yaml. - echo WORKSPACE_ID=$WSID > /etc/omsagent-onboard.conf - echo SHARED_KEY=$KEY >> /etc/omsagent-onboard.conf - echo URL_TLD=$DOMAIN >> /etc/omsagent-onboard.conf - /opt/microsoft/omsagent/bin/omsadmin.sh - CIWORKSPACE_id="$WSID" - CIWORKSPACE_key="$KEY" -fi - -#start cron daemon for logrotate -service cron start - -#check if agent onboarded successfully -/opt/microsoft/omsagent/bin/omsadmin.sh -l - -#get omsagent and docker-provider versions -dpkg -l | grep omsagent | awk '{print $2 " " $3}' -dpkg -l | grep docker-cimprov | awk '{print $2 " " $3}' - -DOCKER_CIMPROV_VERSION=$(dpkg -l | grep docker-cimprov | awk '{print $3}') -echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" -export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION -echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc - -#region check to auto-activate oneagent, to route container logs, -#Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap -# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map -# AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic -echo "************start oneagent log routing checks************" -# by default, use configmap route for safer side -AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE - -#trim region list -oneagentregions="$(echo $AZMON_CONTAINERLOGS_ONEAGENT_REGIONS | xargs)" -#lowercase region list -typeset -l oneagentregions=$oneagentregions -echo "oneagent regions: $oneagentregions" -#trim current region -currentregion="$(echo $AKS_REGION | xargs)" -#lowercase current region -typeset -l currentregion=$currentregion -echo "current region: $currentregion" - -#initilze isoneagentregion as false -isoneagentregion=false - -#set isoneagentregion as true if matching region is found -if [ ! -z $oneagentregions ] && [ ! -z $currentregion ]; then - for rgn in $(echo $oneagentregions | sed "s/,/ /g"); do - if [ "$rgn" == "$currentregion" ]; then - isoneagentregion=true - echo "current region is in oneagent regions..." - break - fi - done -else - echo "current region is not in oneagent regions..." -fi - -if [ "$isoneagentregion" = true ]; then - #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route - if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then - AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE - echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" - else #there is no configmap route, so route thru oneagent - AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE="v2" - echo "oneagent region is true for current region:$currentregion and config map logs route is empty. so using oneagent as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" - fi -else - echo "oneagent region is false for current region:$currentregion" -fi - - -#start oneagent -if [ ! -e "/etc/config/kube.conf" ]; then - if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then - echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" - echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" - #trim - containerlogsroute="$(echo $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE | xargs)" - # convert to lowercase - typeset -l containerlogsroute=$containerlogsroute - - echo "setting AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE as :$containerlogsroute" - export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute - echo "export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute" >> ~/.bashrc - source ~/.bashrc - - if [ "$containerlogsroute" == "v2" ]; then - echo "activating oneagent..." - echo "configuring mdsd..." - cat /etc/mdsd.d/envmdsd | while read line; do - echo $line >> ~/.bashrc - done - source /etc/mdsd.d/envmdsd - - echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" - export CIWORKSPACE_id=$CIWORKSPACE_id - echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc - export CIWORKSPACE_key=$CIWORKSPACE_key - echo "export CIWORKSPACE_key=$CIWORKSPACE_key" >> ~/.bashrc - - source ~/.bashrc - - dpkg -l | grep mdsd | awk '{print $2 " " $3}' - - echo "starting mdsd ..." - mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & - - touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 - fi - fi -fi -echo "************end oneagent log routing checks************" - -#telegraf & fluentbit requirements -if [ ! -e "/etc/config/kube.conf" ]; then - if [ -z "${CONTAINER_TYPE}" ]; then - if [ "$CONTAINER_RUNTIME" == "docker" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" - else - echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" - sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" - fi - elif [ "${CONTAINER_TYPE}" == "Prometheus-Sidecar" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" - fi -# else -# /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & -# telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" -fi - -#set env vars used by telegraf -if [ -z $AKS_RESOURCE_ID ]; then - telemetry_aks_resource_id="" - telemetry_aks_region="" - telemetry_cluster_name="" - telemetry_acs_resource_name=$ACS_RESOURCE_NAME - telemetry_cluster_type="ACS" -else - telemetry_aks_resource_id=$AKS_RESOURCE_ID - telemetry_aks_region=$AKS_REGION - telemetry_cluster_name=$AKS_RESOURCE_ID - telemetry_acs_resource_name="" - telemetry_cluster_type="AKS" -fi - -export TELEMETRY_AKS_RESOURCE_ID=$telemetry_aks_resource_id -echo "export TELEMETRY_AKS_RESOURCE_ID=$telemetry_aks_resource_id" >> ~/.bashrc -export TELEMETRY_AKS_REGION=$telemetry_aks_region -echo "export TELEMETRY_AKS_REGION=$telemetry_aks_region" >> ~/.bashrc -export TELEMETRY_CLUSTER_NAME=$telemetry_cluster_name -echo "export TELEMETRY_CLUSTER_NAME=$telemetry_cluster_name" >> ~/.bashrc -export TELEMETRY_ACS_RESOURCE_NAME=$telemetry_acs_resource_name -echo "export TELEMETRY_ACS_RESOURCE_NAME=$telemetry_acs_resource_name" >> ~/.bashrc -export TELEMETRY_CLUSTER_TYPE=$telemetry_cluster_type -echo "export TELEMETRY_CLUSTER_TYPE=$telemetry_cluster_type" >> ~/.bashrc - -#if [ ! -e "/etc/config/kube.conf" ]; then -# nodename=$(cat /hostfs/etc/hostname) -#else -nodename=$(cat /var/opt/microsoft/docker-cimprov/state/containerhostname) -#fi -echo "nodename: $nodename" -echo "replacing nodename in telegraf config" -sed -i -e "s/placeholder_hostname/$nodename/g" $telegrafConfFile - -export HOST_MOUNT_PREFIX=/hostfs -echo "export HOST_MOUNT_PREFIX=/hostfs" >> ~/.bashrc -export HOST_PROC=/hostfs/proc -echo "export HOST_PROC=/hostfs/proc" >> ~/.bashrc -export HOST_SYS=/hostfs/sys -echo "export HOST_SYS=/hostfs/sys" >> ~/.bashrc -export HOST_ETC=/hostfs/etc -echo "export HOST_ETC=/hostfs/etc" >> ~/.bashrc -export HOST_VAR=/hostfs/var -echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc - -aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) -export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey -echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc - -source ~/.bashrc - -#start telegraf -/opt/telegraf --config $telegrafConfFile & -/opt/telegraf --version -dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' - -#dpkg -l | grep telegraf | awk '{print $2 " " $3}' - - - -echo "stopping rsyslog..." -service rsyslog stop - -echo "getting rsyslog status..." -service rsyslog status - -shutdown() { - /opt/microsoft/omsagent/bin/service_control stop - } - -trap "shutdown" SIGTERM - -sleep inf & wait diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index f6bbc6619..28cd8b270 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -262,27 +262,6 @@ if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidec /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb fi -#Parse the OSM configmap to set the right environment variables for metric collection settings -#This needs to be done before the prometheus custom config map parsing since we have namespace duplication logic in place. -# if [ ! -e "/etc/config/kube.conf" ]; then -# if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then -# /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - -# cat integration_osm_config_env_var | while read line; do -# echo $line >> ~/.bashrc -# done -# source integration_osm_config_env_var - -# #Sourcing prometheus side car config settings if it exists -# # if [ -e "prom_config_shared_settings_env_var" ]; then -# # cat prom_config_shared_settings_env_var | while read line; do -# # echo $line >> ~/.bashrc -# # done -# # source prom_config_shared_settings_env_var -# # fi -# fi -# fi - #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 96a5d9bac..cfc793262 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -402,9 +402,16 @@ Start-FileSystemWatcher # Bootstrap-CACertificates # } + Generate-Certificates Test-CertificatePath -Start-Telegraf +# Start telegraf only in sidecar scraping mode +$sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') +if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') +{ + Start-Telegraf +} + Start-Fluent # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 337e81195..b5e194c33 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -34,22 +34,26 @@ Write-Host ('Installing Fluent Bit'); } Write-Host ('Finished Installing Fluentbit') -Write-Host ('Installing Telegraf'); - - try { - $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win-debug.zip' - Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip - Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win - Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue - } - catch { - $ex = $_.Exception - Write-Host "exception while downloading telegraf for windows" - Write-Host $ex - exit 1 - } -Write-Host ('Finished downloading Telegraf') +# Start telegraf only in sidecar scraping mode +$sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') +if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') +{ + Write-Host ('Installing Telegraf'); + try { + $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win-debug.zip' + Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip + Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win + Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue + } + catch { + $ex = $_.Exception + Write-Host "exception while downloading telegraf for windows" + Write-Host $ex + exit 1 + } + Write-Host ('Finished downloading Telegraf') +} Write-Host ('Installing Visual C++ Redistributable Package') $vcRedistLocation = 'https://aka.ms/vs/16/release/vc_redist.x64.exe' From c8a16cc436ece4da22d117668fcf0b1a985c12f1 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 4 Mar 2021 18:23:56 -0800 Subject: [PATCH 141/175] more changes --- .../installer/scripts/tomlparser-prom-customconfig.rb | 9 ++++----- build/linux/installer/scripts/tomlparser-osm-config.rb | 3 +-- kubernetes/linux/main.sh | 6 +++--- kubernetes/omsagent.yaml | 3 +++ source/plugins/ruby/in_kube_nodes.rb | 2 ++ 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 4559bceaa..2ae3a513b 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -85,7 +85,7 @@ def checkForType(variable, varType) def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) begin - puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with no namespace filters" + puts "config::Starting to substitute the placeholders in telegraf conf copy file with no namespace filters" new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", ("pod_scrape_scope = #{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") @@ -99,7 +99,7 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) begin - puts "config::Starting to substitute the placeholders in telegraf conf copy file for prometheus side car with namespace filters" + puts "config::Starting to substitute the placeholders in telegraf conf copy file with namespace filters" new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR") @@ -140,7 +140,6 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu # Use the ruby structure created after config parsing to set the right values to be used as environment variables def populateSettingValuesFromConfigMap(parsedConfig) - # containerOs = ENV["CONTAINER_OS"] if !@controller.nil? if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? if @controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? @@ -158,7 +157,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] - # Check for the right datattypes to enforce right setting values + # Check for the right datatypes to enforce right setting values if checkForType(interval, String) && checkForTypeArray(fieldPass, String) && checkForTypeArray(fieldDrop, String) && @@ -233,7 +232,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") # Remove below block after phased rollout - if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0)) + if (!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0) file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") file.write("export TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 5f58a934a..d0ab15853 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -15,8 +15,7 @@ !@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0 @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" @tgfTestConfigFile = "/opt/telegraf-test-rs.conf" -elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp(@prometheusSidecar) == 0 && - !@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0 +elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp(@prometheusSidecar) == 0 @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" @tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" end diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 28cd8b270..7d9406ab3 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -40,7 +40,7 @@ inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyout #Run inotify as a daemon to track changes to the mounted configmap for OSM settings. if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || - ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then inotifywait /etc/config/osm-settings --daemon --recursive --outfile "/opt/inotifyoutput-osm.txt" --event create,delete --format '%e : %T' --timefmt '+%s' fi @@ -86,7 +86,7 @@ fi #set OSM config schema version if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || - ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then #trim osm_config_schema_version="$(cat /etc/config/osm-settings/schema-version | xargs)" @@ -313,7 +313,7 @@ source config_metric_collection_env_var # OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || - ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb cat integration_osm_config_env_var | while read line; do diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index a3615e6fd..25c5620a4 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -440,6 +440,7 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 +#Only in sidecar scraping mode - name: omsagent-prometheus image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" imagePullPolicy: IfNotPresent @@ -666,6 +667,7 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true +# only in sidecar scraping mode - mountPath: /etc/config/osm-settings name: osm-settings-vol-config readOnly: true @@ -740,6 +742,7 @@ spec: secret: secretName: omsagent-adx-secret optional: true +# only in sidecar scraping mode - name: osm-settings-vol-config configMap: name: container-azm-ms-osmconfig diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 6106a3e83..be870ab5e 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -22,6 +22,7 @@ class Kube_nodeInventory_Input < Input @@rsPromMonitorPodsLabelSelectorLength = ENV["TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH"] @@rsPromMonitorPodsFieldSelectorLength = ENV["TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH"] @@collectAllKubeEvents = ENV["AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS"] + @@osmNamespaceCount = ENV["TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT"] def initialize super @@ -291,6 +292,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength + properties["osmNamespaceCount"] == @@osmNamespaceCount end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) telemetrySent = true From 86b7bd3d6cc7f34d7ccd21859d46144e6cca6428 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 12:47:42 -0800 Subject: [PATCH 142/175] changes for backcompat --- .../common/installer/scripts/tomlparser-prom-customconfig.rb | 4 ++-- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 2ae3a513b..7aac8a5c1 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -198,7 +198,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - # - to use defaults in case of nil settings # Remove below block after phased rollout - if ((!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0)) + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && (@sidecarScrapingEnabled.casecmp("false") == 0))) monitorKubernetesPodsNSConfig = [] if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) # Adding a check to see if an empty array is passed for kubernetes namespaces @@ -232,7 +232,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") # Remove below block after phased rollout - if (!@sidecarScrapingEnabled.nil? && @sidecarScrapingEnabled.casecmp("false") == 0) + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && (@sidecarScrapingEnabled.casecmp("false") == 0))) file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") file.write("export TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index d0ab15853..2939f12e9 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -12,7 +12,7 @@ @prometheusSidecar = "prometheussidecar" if !@controllerType.nil? && !@controllerType.empty? && @controllerType.strip.casecmp(@replicaset) == 0 && - !@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0 + (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0)) @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" @tgfTestConfigFile = "/opt/telegraf-test-rs.conf" elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp(@prometheusSidecar) == 0 From 12a8f5c1ada29598637230ebc827de1cba0dc02e Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 13:14:18 -0800 Subject: [PATCH 143/175] changes --- .../common/installer/scripts/tomlparser-prom-customconfig.rb | 4 ++-- build/linux/installer/conf/telegraf-prom-side-car.conf | 2 +- build/linux/installer/conf/telegraf-rs.conf | 3 ++- build/windows/installer/conf/telegraf.conf | 2 +- kubernetes/linux/defaultpromenvvariables-rs | 2 +- kubernetes/linux/defaultpromenvvariables-sidecar | 2 +- kubernetes/windows/setdefaulttelegrafenvvariables.ps1 | 4 ++-- 7 files changed, 10 insertions(+), 9 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 7aac8a5c1..32f342b67 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -87,7 +87,7 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern begin puts "config::Starting to substitute the placeholders in telegraf conf copy file with no namespace filters" new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) - new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", ("pod_scrape_scope = #{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", ("pod_scrape_scope = #{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) @@ -104,7 +104,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR") - new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE") pluginConfigsWithNamespaces = "" monitorKubernetesPodsNamespaces.each do |namespace| diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 862c932a1..dba2418ca 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -742,7 +742,7 @@ ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index 4d6079145..ee1cf8819 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -555,7 +555,8 @@ ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE + + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf index 7bdfd0a9a..233600251 100644 --- a/build/windows/installer/conf/telegraf.conf +++ b/build/windows/installer/conf/telegraf.conf @@ -736,7 +736,7 @@ ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR diff --git a/kubernetes/linux/defaultpromenvvariables-rs b/kubernetes/linux/defaultpromenvvariables-rs index 9bf570e3a..920f4e90e 100644 --- a/kubernetes/linux/defaultpromenvvariables-rs +++ b/kubernetes/linux/defaultpromenvvariables-rs @@ -1,6 +1,6 @@ export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" -export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE="pod_scrape_scope = 'cluster'" +export AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE="pod_scrape_scope = 'cluster'" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" export AZMON_TELEGRAF_CUSTOM_PROM_URLS="[]" diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar index 84a0c56aa..3301488d8 100644 --- a/kubernetes/linux/defaultpromenvvariables-sidecar +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -1,6 +1,6 @@ export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" -export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE="pod_scrape_scope = 'node'" +export AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE="pod_scrape_scope = 'node'" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" diff --git a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 index 2bfedc0a8..269894139 100644 --- a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 +++ b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 @@ -2,8 +2,8 @@ [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "machine") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "process") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "machine") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", "pod_scrape_scope = 'node'", "process") -[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS_SCOPE", "pod_scrape_scope = 'node'", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "pod_scrape_scope = 'node'", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "pod_scrape_scope = 'node'", "machine") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "process") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "machine") [System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "process") From 246460dfc2d0905f0e798552d9eaff1ba10752c1 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 13:27:33 -0800 Subject: [PATCH 144/175] new telegraf build --- kubernetes/linux/setup.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 5fd5414f0..056a147a6 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -61,7 +61,7 @@ sudo apt-get install libcap2-bin -y #service telegraf stop #wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf -wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-3/telegraf +wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf chmod 777 /opt/telegraf From a46ed9a95396a8c4c3f9240eefe15dd9e1a3f102 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 13:40:13 -0800 Subject: [PATCH 145/175] bug fixes --- .../common/installer/scripts/tomlparser-prom-customconfig.rb | 4 ++-- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 32f342b67..2e112f36c 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -87,7 +87,7 @@ def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubern begin puts "config::Starting to substitute the placeholders in telegraf conf copy file with no namespace filters" new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) - new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", ("pod_scrape_scope = #{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", ("pod_scrape_scope = \"#{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}\"")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) @@ -115,7 +115,7 @@ def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKu pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] interval = \"#{interval}\" monitor_kubernetes_pods = true - pod_scrape_scope = #{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"} + pod_scrape_scope = \"#{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}\" monitor_kubernetes_pods_namespace = \"#{namespace}\" kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index 2939f12e9..a195bf8ae 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -106,7 +106,7 @@ def replaceOsmTelegrafConfigPlaceHolders name_prefix=\"container.azm.ms.osm/\" interval = \"#{@scrapeInterval}\" monitor_kubernetes_pods = true - pod_scrape_scope = #{(@controllerType.casecmp(@replicaset) == 0) ? "cluster" : "node"} + pod_scrape_scope = \"#{(@controllerType.casecmp(@replicaset) == 0) ? "cluster" : "node"}\" monitor_kubernetes_pods_namespace = \"#{namespace}\" fieldpass = #{@fieldPassSetting} metric_version = #{@metricVersion} From 6d2d09da075bf65e27c2dffba5a594929816d0ab Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 13:46:55 -0800 Subject: [PATCH 146/175] including latest changes --- kubernetes/windows/setup.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index b5e194c33..a3662a664 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -41,7 +41,7 @@ if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabl Write-Host ('Installing Telegraf'); try { - $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-win/telegraf-win-debug.zip' + $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf-win.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue From dd10118e0f05a2c4e61da4851cc2ad1767a7b8c7 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 17:46:48 -0800 Subject: [PATCH 147/175] fixing telemetry bug --- build/linux/installer/conf/telegraf-rs.conf | 6 +-- source/plugins/go/src/telemetry.go | 44 +++++++++++++-------- 2 files changed, 31 insertions(+), 19 deletions(-) diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index ee1cf8819..f534bb992 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -73,11 +73,11 @@ ## Logging configuration: ## Run telegraf with debug log messages. - debug = false + debug = true ## Run telegraf in quiet mode (error log messages only). - quiet = true + quiet = false ## Specify the log file name. The empty string means to log to stderr. - logfile = "" + logfile = "/opt/telegraf-logs-debug.txt" ## Override default hostname, if empty use os.Hostname() #hostname = "placeholder_hostname" diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index e35af6dc9..52aafc7b8 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -303,31 +303,43 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { TelemetryClient.Context().CommonProperties = CommonProperties // Getting the namespace count, monitor kubernetes pods values and namespace count once at start because it wont change unless the configmap is applied and the container is restarted + + OSMNamespaceCount = 0 osmNsCount := os.Getenv("TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT") - OSMNamespaceCount, err = strconv.Atoi(osmNsCount) - if err != nil { - Log("OSM namespace count string to int conversion error %s", err.Error()) - OSMNamespaceCount = 0 + if osmNsCount != nil && osmNsCount != "" { + OSMNamespaceCount, err = strconv.Atoi(osmNsCount) + if err != nil { + Log("OSM namespace count string to int conversion error %s", err.Error()) + } } + PromMonitorPods = os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS") + + PromMonitorPodsNamespaceLength = 0 promMonPodsNamespaceLength := os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH") - PromMonitorPodsNamespaceLength, err = strconv.Atoi(promMonPodsNamespaceLength) - if err != nil { - Log("Custom prometheus monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) - PromMonitorPodsNamespaceLength = 0 + if promMonPodsNamespaceLength != nil && promMonPodsNamespaceLength != "" { + PromMonitorPodsNamespaceLength, err = strconv.Atoi(promMonPodsNamespaceLength) + if err != nil { + Log("Custom prometheus monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) + } } + + PromMonitorPodsLabelSelectorLength = 0 promLabelSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH") - PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) - if err != nil { - Log("Custom prometheus label selector count string to int conversion error %s", err.Error()) - PromMonitorPodsLabelSelectorLength = 0 + if promLabelSelectorLength != nil && promLabelSelectorLength != "" { + PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) + if err != nil { + Log("Custom prometheus label selector count string to int conversion error %s", err.Error()) + } } + PromMonitorPodsFieldSelectorLength = 0 promFieldSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH") - PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) - if err != nil { - Log("Custom prometheus field selector count string to int conversion error %s", err.Error()) - PromMonitorPodsFieldSelectorLength = 0 + if promFieldSelectorLength != nil && promFieldSelectorLength != "" { + PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) + if err != nil { + Log("Custom prometheus field selector count string to int conversion error %s", err.Error()) + } } return 0, nil From 998cb089f81186e537df868bf43103348976d122 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 17:48:19 -0800 Subject: [PATCH 148/175] fix --- source/plugins/go/src/telemetry.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 52aafc7b8..25c77c6f7 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -306,7 +306,7 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { OSMNamespaceCount = 0 osmNsCount := os.Getenv("TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT") - if osmNsCount != nil && osmNsCount != "" { + if osmNsCount != "" { OSMNamespaceCount, err = strconv.Atoi(osmNsCount) if err != nil { Log("OSM namespace count string to int conversion error %s", err.Error()) @@ -317,7 +317,7 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { PromMonitorPodsNamespaceLength = 0 promMonPodsNamespaceLength := os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH") - if promMonPodsNamespaceLength != nil && promMonPodsNamespaceLength != "" { + if promMonPodsNamespaceLength != "" { PromMonitorPodsNamespaceLength, err = strconv.Atoi(promMonPodsNamespaceLength) if err != nil { Log("Custom prometheus monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) @@ -326,7 +326,7 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { PromMonitorPodsLabelSelectorLength = 0 promLabelSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH") - if promLabelSelectorLength != nil && promLabelSelectorLength != "" { + if promLabelSelectorLength != "" { PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) if err != nil { Log("Custom prometheus label selector count string to int conversion error %s", err.Error()) @@ -335,7 +335,7 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { PromMonitorPodsFieldSelectorLength = 0 promFieldSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH") - if promFieldSelectorLength != nil && promFieldSelectorLength != "" { + if promFieldSelectorLength != "" { PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) if err != nil { Log("Custom prometheus field selector count string to int conversion error %s", err.Error()) From 1fe58429020261fc7c3933789797bff803cc1d0f Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 18:57:35 -0800 Subject: [PATCH 149/175] fixing osm parsing --- .../scripts/tomlparser-osm-config.rb | 41 +++++++++---------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index a195bf8ae..b3fb1f97a 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -137,31 +137,30 @@ def replaceOsmTelegrafConfigPlaceHolders configMapSettings = parseConfigMap if !configMapSettings.nil? populateSettingValuesFromConfigMap(configMapSettings) + # Check to see if the prometheus custom config parser has created a test config file so that we can replace the settings in the test file and run it, If not create + # a test config file by copying contents of the actual telegraf config file. + if (!File.exist?(@tgfTestConfigFile)) + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + puts "test telegraf config file #{@tgfTestConfigFile} does not exist, creating new one" + FileUtils.cp(@tgfConfigFile, @tgfTestConfigFile) + end + + replaceOsmTelegrafConfigPlaceHolders() + + # Write the telemetry to file, so that they can be set as environment variables + telemetryFile = File.open("integration_osm_config_env_var", "w") + + if !telemetryFile.nil? + telemetryFile.write("export TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT=#{@osmMetricNamespaces.length}\n") + # Close file after writing all environment variables + telemetryFile.close + else + puts "config::osm::Exception while opening file for writing OSM telemetry environment variables" + end end else if (File.file?(@configMapMountPath)) ConfigParseErrorLogger.logError("config::osm::unsupported/missing config schema version - '#{@osmConfigSchemaVersion}' , using defaults, please use supported schema version") end end - -# Check to see if the prometheus custom config parser has created a test config file so that we can replace the settings in the test file and run it, If not create -# a test config file by copying contents of the actual telegraf config file. -if (!File.exist?(@tgfTestConfigFile)) - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - puts "test telegraf config file #{@tgfTestConfigFile} does not exist, creating new one" - FileUtils.cp(@tgfConfigFile, @tgfTestConfigFile) -end - -replaceOsmTelegrafConfigPlaceHolders() - -# Write the telemetry to file, so that they can be set as environment variables -telemetryFile = File.open("integration_osm_config_env_var", "w") - -if !telemetryFile.nil? - telemetryFile.write("export TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT=#{@osmMetricNamespaces.length}\n") - # Close file after writing all environment variables - telemetryFile.close -else - puts "config::osm::Exception while opening file for writing OSM telemetry environment variables" -end puts "****************End OSM Config Processing********************" From a4deef7bc97bc5d92b9c11dc18ffbdac69131889 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 19:08:43 -0800 Subject: [PATCH 150/175] clean up main.sh --- kubernetes/linux/main.sh | 71 +++++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 34 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 7d9406ab3..9870a45d0 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -227,35 +227,37 @@ echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc source ~/.bashrc +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + #Parse the configmap to set the right environment variables. + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb -#Parse the configmap to set the right environment variables. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb - -cat config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source config_env_var - + cat config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc + done + source config_env_var +fi #Parse the configmap to set the right environment variables for agent config. #Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb -cat agent_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source agent_config_env_var + cat agent_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc + done + source agent_config_env_var -#Parse the configmap to set the right environment variables for network policy manager (npm) integration. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb + #Parse the configmap to set the right environment variables for network policy manager (npm) integration. + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb -cat integration_npm_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source integration_npm_config_env_var + cat integration_npm_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc + done + source integration_npm_config_env_var +fi #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then @@ -295,21 +297,22 @@ fi #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb -cat config_mdm_metrics_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_mdm_metrics_env_var - -#Parse the configmap to set the right environment variables for metric collection settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + cat config_mdm_metrics_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_mdm_metrics_env_var -cat config_metric_collection_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_metric_collection_env_var + #Parse the configmap to set the right environment variables for metric collection settings + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + cat config_metric_collection_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_metric_collection_env_var +fi # OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || From fcb3bc24095047cde6efeebeb60661250db6ac1f Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 19:24:00 -0800 Subject: [PATCH 151/175] bug fix --- kubernetes/linux/main.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 9870a45d0..c11d10fed 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -319,10 +319,12 @@ if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "Prometheus ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb - cat integration_osm_config_env_var | while read line; do - echo $line >> ~/.bashrc - done - source integration_osm_config_env_var + if [ -e "integration_osm_config_env_var" ]; then + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + fi fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request From de826ae2ddb6bcbbf6567798ef0b259726cf9c66 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Fri, 5 Mar 2021 19:31:18 -0800 Subject: [PATCH 152/175] bug fix --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 ++ 1 file changed, 2 insertions(+) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index b3fb1f97a..f9b399816 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -161,6 +161,8 @@ def replaceOsmTelegrafConfigPlaceHolders else if (File.file?(@configMapMountPath)) ConfigParseErrorLogger.logError("config::osm::unsupported/missing config schema version - '#{@osmConfigSchemaVersion}' , using defaults, please use supported schema version") + else + puts "config::No configmap mounted for OSM config, using defaults" end end puts "****************End OSM Config Processing********************" From 5ece56c88285681cf4789254981add6c439621b6 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 8 Mar 2021 10:47:16 -0800 Subject: [PATCH 153/175] bug fixes --- kubernetes/windows/setup.ps1 | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index a3662a664..190899605 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -34,26 +34,20 @@ Write-Host ('Installing Fluent Bit'); } Write-Host ('Finished Installing Fluentbit') -# Start telegraf only in sidecar scraping mode -$sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') -if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') -{ - Write-Host ('Installing Telegraf'); - - try { - $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf-win.zip' - Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip - Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win - Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue - } - catch { - $ex = $_.Exception - Write-Host "exception while downloading telegraf for windows" - Write-Host $ex - exit 1 - } - Write-Host ('Finished downloading Telegraf') +Write-Host ('Installing Telegraf'); +try { + $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf-win.zip' + Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip + Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win + Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue +} +catch { + $ex = $_.Exception + Write-Host "exception while downloading telegraf for windows" + Write-Host $ex + exit 1 } +Write-Host ('Finished downloading Telegraf') Write-Host ('Installing Visual C++ Redistributable Package') $vcRedistLocation = 'https://aka.ms/vs/16/release/vc_redist.x64.exe' From c8f2745ba0c73343695fa169022e00f6da7f8154 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 8 Mar 2021 14:08:14 -0800 Subject: [PATCH 154/175] changes --- build/linux/installer/conf/telegraf-prom-side-car.conf | 6 ------ kubernetes/windows/main.ps1 | 6 +++--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index dba2418ca..061b0cb94 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -729,12 +729,6 @@ [[inputs.prometheus]] interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" - ## An array of urls to scrape metrics from. - # urls = $AZMON_RS_PROM_URLS - - # ## An array of Kubernetes services to scrape metrics from. - # kubernetes_services = $AZMON_RS_PROM_K8S_SERVICES - ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index cfc793262..bbcc92c4c 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -335,11 +335,11 @@ function Start-Telegraf { } Write-Host "Installing telegraf service" - C:\opt\telegraf\telegraf-win\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" + C:\opt\telegraf\telegraf-win\telegraf-win.exe --service install --config "C:\etc\telegraf\telegraf.conf" Write-Host "Running telegraf service in test mode" - C:\opt\telegraf\telegraf-win\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test + C:\opt\telegraf\telegraf-win\telegraf-win.exe --config "C:\etc\telegraf\telegraf.conf" --test Write-Host "Starting telegraf service" - C:\opt\telegraf\telegraf-win\telegraf.exe --service start + C:\opt\telegraf\telegraf-win\telegraf-win.exe --service start } function Generate-Certificates { From 443e7844feadba759f523bb9d09edd7d0c858330 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 8 Mar 2021 19:22:47 -0800 Subject: [PATCH 155/175] telemetry bug fix --- source/plugins/ruby/in_kube_nodes.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index be870ab5e..0195c18c3 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -292,7 +292,7 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength - properties["osmNamespaceCount"] == @@osmNamespaceCount + properties["osmNamespaceCount"] = @@osmNamespaceCount end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) telemetrySent = true From abdb59a24962c79b1021ea930eb0cacb912a04be Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 9 Mar 2021 18:50:39 -0800 Subject: [PATCH 156/175] adding exception handler --- kubernetes/windows/main.ps1 | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index bbcc92c4c..e77e92161 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -405,6 +405,20 @@ Start-FileSystemWatcher Generate-Certificates Test-CertificatePath +# # Start telegraf only in sidecar scraping mode +# $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') +# if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') +# { +# Start-Telegraf +# } +try { + Start-Fluent +} catch { + $e = $_.Exception + Write-Host $e + Write-Host "exception occured while starting fluent..." + } + # Start telegraf only in sidecar scraping mode $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') @@ -412,7 +426,6 @@ if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabl Start-Telegraf } -Start-Fluent # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId From 9247709babdd9897b82a446d6c31ba1cfde48192 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Mar 2021 11:51:08 -0800 Subject: [PATCH 157/175] fixes for socket issue --- kubernetes/windows/main.ps1 | 40 ++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index e77e92161..589af1727 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -273,7 +273,7 @@ function Get-ContainerRuntime { return $containerRuntime } -function Start-Fluent { +function Start-Fluent-Telegraf { # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service. # Run fluent-bit as a background job. Switch this to a windows service once fluent-bit supports natively running as a windows service @@ -289,6 +289,14 @@ function Start-Fluent { (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf','fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf } + # Start telegraf only in sidecar scraping mode + $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') + if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') + { + Write-Host "Starting telegraf..." + Start-Telegraf + } + fluentd --reg-winsvc i --reg-winsvc-auto-start --winsvc-name fluentdwinaks --reg-winsvc-fluentdopt '-c C:/etc/fluent/fluent.conf -o C:/etc/fluent/fluent.log' Notepad.exe | Out-Null @@ -340,6 +348,16 @@ function Start-Telegraf { C:\opt\telegraf\telegraf-win\telegraf-win.exe --config "C:\etc\telegraf\telegraf.conf" --test Write-Host "Starting telegraf service" C:\opt\telegraf\telegraf-win\telegraf-win.exe --service start + + # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup + Get-Service telegraf | findstr Running + if ($? -eq false) + { + Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." + Start-Sleep -s 30 + C:\opt\telegraf\telegraf-win\telegraf-win.exe --service start + Get-Service telegraf + } } function Generate-Certificates { @@ -411,20 +429,14 @@ Test-CertificatePath # { # Start-Telegraf # } -try { - Start-Fluent -} catch { - $e = $_.Exception - Write-Host $e - Write-Host "exception occured while starting fluent..." - } +Start-Fluent-Telegraf -# Start telegraf only in sidecar scraping mode -$sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') -if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') -{ - Start-Telegraf -} +# # Start telegraf only in sidecar scraping mode +# $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') +# if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') +# { +# Start-Telegraf +# } # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 From bbbdaf1883b1ed941b6612de024a889798c532f1 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Mar 2021 12:55:19 -0800 Subject: [PATCH 158/175] bug fix --- kubernetes/windows/main.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 589af1727..43453f7ed 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -351,7 +351,7 @@ function Start-Telegraf { # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup Get-Service telegraf | findstr Running - if ($? -eq false) + if ($? -eq $false) { Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." Start-Sleep -s 30 From bb49bb0faf0401402ef4094d32d5b6076e284853 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 10 Mar 2021 13:09:55 -0800 Subject: [PATCH 159/175] adding metrics --- build/linux/installer/scripts/tomlparser-osm-config.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb index f9b399816..096064db8 100644 --- a/build/linux/installer/scripts/tomlparser-osm-config.rb +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -30,7 +30,7 @@ @metricVersion = 2 @monitorKubernetesPodsVersion = 2 #@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" -@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time_bucket\", \"envoy_cluster_upstream_cx_rx_bytes_total\", \"envoy_cluster_upstream_cx_tx_bytes_total\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time_bucket\", \"envoy_cluster_upstream_cx_rx_bytes_total\", \"envoy_cluster_upstream_cx_tx_bytes_total\", \"envoy_cluster_upstream_cx_active\"]" @scrapeInterval = "1m" @urlTag = "scrapeUrl" @bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" From a72f0a55e7c0aec16569fe8b87ca42e068e18fc4 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 15 Mar 2021 18:40:09 -0700 Subject: [PATCH 160/175] getting telegraf build from pre release --- kubernetes/linux/setup.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 056a147a6..bcbaffc5d 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -61,7 +61,13 @@ sudo apt-get install libcap2-bin -y #service telegraf stop #wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf -wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf +#wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf + +#1.18 pre-release +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_linux_amd64.tar.gz +tar -zxvf telegraf-1.18.0~rc1_linux_amd64.tar.gz + +mv /opt/telegraf-1.18.0/usr/bin/telegraf /opt/telegraf chmod 777 /opt/telegraf From 03b4b16439b6223ea8c7b565f790c5e615eaaebf Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 16 Mar 2021 10:36:09 -0700 Subject: [PATCH 161/175] windows changes --- kubernetes/windows/main.ps1 | 9 +++++---- kubernetes/windows/setup.ps1 | 3 ++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 43453f7ed..aeb3de8ac 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -343,11 +343,12 @@ function Start-Telegraf { } Write-Host "Installing telegraf service" - C:\opt\telegraf\telegraf-win\telegraf-win.exe --service install --config "C:\etc\telegraf\telegraf.conf" + # C:\opt\telegraf\telegraf-win\telegraf-win.exe --service install --config "C:\etc\telegraf\telegraf.conf" + C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" Write-Host "Running telegraf service in test mode" - C:\opt\telegraf\telegraf-win\telegraf-win.exe --config "C:\etc\telegraf\telegraf.conf" --test + C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test Write-Host "Starting telegraf service" - C:\opt\telegraf\telegraf-win\telegraf-win.exe --service start + C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service start # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup Get-Service telegraf | findstr Running @@ -355,7 +356,7 @@ function Start-Telegraf { { Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." Start-Sleep -s 30 - C:\opt\telegraf\telegraf-win\telegraf-win.exe --service start + C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service start Get-Service telegraf } } diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 190899605..e820498f3 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -36,7 +36,8 @@ Write-Host ('Finished Installing Fluentbit') Write-Host ('Installing Telegraf'); try { - $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf-win.zip' + # $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf-win.zip' + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue From 08fa562f989f8fe08eb6ae8226e774d5dd5a3249 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 16 Mar 2021 11:42:44 -0700 Subject: [PATCH 162/175] bug fix --- kubernetes/windows/setup.ps1 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index e820498f3..aa3afb48e 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -38,9 +38,9 @@ Write-Host ('Installing Telegraf'); try { # $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf-win.zip' $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_windows_amd64.zip' - Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf-win.zip - Expand-Archive -Path /installation/telegraf-win.zip -Destination /installation/telegraf-win - Move-Item -Path /installation/telegraf-win -Destination /opt/telegraf/ -ErrorAction SilentlyContinue + Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip + Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf + Move-Item -Path /installation/telegraf -Destination /opt/telegraf/ -ErrorAction SilentlyContinue } catch { $ex = $_.Exception From fc16a38b932e0f9264949b44080153c251449493 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 16 Mar 2021 12:50:41 -0700 Subject: [PATCH 163/175] some changes --- kubernetes/omsagent.yaml | 4 ++++ kubernetes/windows/main.ps1 | 19 +++++++++++++++++++ kubernetes/windows/setup.ps1 | 2 +- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 25c5620a4..e7af896b9 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -802,6 +802,10 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: PODNAME + valueFrom: + fieldRef: + fieldPath: metadata.name - name: NODE_IP valueFrom: fieldRef: diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index aeb3de8ac..4e2373f4d 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -343,6 +343,25 @@ function Start-Telegraf { } Write-Host "Installing telegraf service" + # Setting delay auto start for telegraf since there have been known issues with windows server and telegraf - + # https://github.com/influxdata/telegraf/issues/4081 + # https://github.com/influxdata/telegraf/issues/3601 + try { + $serverName = [System.Environment]::GetEnvironmentVariable("PODNAME", "process") + if (![string]::IsNullOrEmpty($serverName)) { + sc.exe \\$serverName config telegraf start= delayed-auto + Write-Host "Successfully set delayed start for telegraf" + + } else { + Write-Host "Failed to get environment variable PODNAME to set delayed telegraf start" + } + } + catch { + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in delayed telegraf start.. continuing without exiting" + } + # C:\opt\telegraf\telegraf-win\telegraf-win.exe --service install --config "C:\etc\telegraf\telegraf.conf" C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" Write-Host "Running telegraf service in test mode" diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index aa3afb48e..056a7c705 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -40,7 +40,7 @@ try { $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf - Move-Item -Path /installation/telegraf -Destination /opt/telegraf/ -ErrorAction SilentlyContinue + Move-Item -Path /installation/telegraf -Destination /opt/telegraf -ErrorAction SilentlyContinue } catch { $ex = $_.Exception From b7a9cd36e2cfbb0899e8e6f9d84ec258650cb1ad Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 16 Mar 2021 14:01:43 -0700 Subject: [PATCH 164/175] bug fixes --- kubernetes/windows/main.ps1 | 5 ++--- kubernetes/windows/setup.ps1 | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 4e2373f4d..45f18932e 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -343,6 +343,8 @@ function Start-Telegraf { } Write-Host "Installing telegraf service" + C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" + # Setting delay auto start for telegraf since there have been known issues with windows server and telegraf - # https://github.com/influxdata/telegraf/issues/4081 # https://github.com/influxdata/telegraf/issues/3601 @@ -361,9 +363,6 @@ function Start-Telegraf { Write-Host $e Write-Host "exception occured in delayed telegraf start.. continuing without exiting" } - - # C:\opt\telegraf\telegraf-win\telegraf-win.exe --service install --config "C:\etc\telegraf\telegraf.conf" - C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" Write-Host "Running telegraf service in test mode" C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test Write-Host "Starting telegraf service" diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 056a7c705..9a67c9f44 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -40,7 +40,7 @@ try { $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf - Move-Item -Path /installation/telegraf -Destination /opt/telegraf -ErrorAction SilentlyContinue + Move-Item -Path /installation/telegraf -Destination /opt/ -ErrorAction SilentlyContinue } catch { $ex = $_.Exception From 362f659ea5f965cfb2e4ba3515155e5e5a240ad9 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 16 Mar 2021 17:20:56 -0700 Subject: [PATCH 165/175] fixing bug --- kubernetes/windows/main.ps1 | 10 +++++----- kubernetes/windows/setup.ps1 | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 45f18932e..d60214347 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -343,8 +343,8 @@ function Start-Telegraf { } Write-Host "Installing telegraf service" - C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" - + C:\opt\telegraf\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" + # Setting delay auto start for telegraf since there have been known issues with windows server and telegraf - # https://github.com/influxdata/telegraf/issues/4081 # https://github.com/influxdata/telegraf/issues/3601 @@ -364,9 +364,9 @@ function Start-Telegraf { Write-Host "exception occured in delayed telegraf start.. continuing without exiting" } Write-Host "Running telegraf service in test mode" - C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test + C:\opt\telegraf\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test Write-Host "Starting telegraf service" - C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service start + C:\opt\telegraf\telegraf.exe --service start # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup Get-Service telegraf | findstr Running @@ -374,7 +374,7 @@ function Start-Telegraf { { Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." Start-Sleep -s 30 - C:\opt\telegraf\telegraf-1.18.0\telegraf.exe --service start + C:\opt\telegraf\telegraf.exe --service start Get-Service telegraf } } diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index 9a67c9f44..e2a2e82e0 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -40,7 +40,7 @@ try { $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf - Move-Item -Path /installation/telegraf -Destination /opt/ -ErrorAction SilentlyContinue + Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue } catch { $ex = $_.Exception From 60c2d03698c816e83329660010675b65c0a86a14 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 16 Mar 2021 18:44:05 -0700 Subject: [PATCH 166/175] removing some comments and unused code --- .../scripts/tomlparser-prom-customconfig.rb | 2 - .../conf/telegraf-prom-side-car.conf | 111 ------------------ build/windows/installer/conf/telegraf.conf | 76 ------------ kubernetes/linux/main.sh | 2 - kubernetes/linux/setup.sh | 1 - kubernetes/omsagent.yaml | 13 -- kubernetes/windows/main.ps1 | 44 ------- 7 files changed, 249 deletions(-) diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb index 2e112f36c..819c1956f 100644 --- a/build/common/installer/scripts/tomlparser-prom-customconfig.rb +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -343,8 +343,6 @@ def populateSettingValuesFromConfigMap(parsedConfig) end # end of type check condition rescue => errorStr ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for promethues side car: #{errorStr}, using defaults") - # look into this - #setRsPromDefaults puts "****************End Prometheus Config Processing********************" end elsif @controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 061b0cb94..a33ee21fd 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -123,41 +123,6 @@ namedrop = ["agent_telemetry"] #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] -# # Output to send MDM metrics to fluent bit and then route it to fluentD -# [[outputs.socket_writer]] -# ## URL to connect to -# address = "tcp://0.0.0.0:25228" -# # address = "tcp://example.com:http" -# # address = "tcp4://127.0.0.1:8094" -# # address = "tcp6://127.0.0.1:8094" -# # address = "tcp6://[2001:db8::1]:8094" -# # address = "udp://127.0.0.1:8094" -# # address = "udp4://127.0.0.1:8094" -# # address = "udp6://127.0.0.1:8094" -# # address = "unix:///tmp/telegraf.sock" -# # address = "unixgram:///tmp/telegraf.sock" - -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - -# ## Period between keep alive probes. -# ## Only applies to TCP sockets. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# # keep_alive_period = "5m" - -# ## Data format to generate. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "json" -# namepass = ["container.azm.ms/disk"] -# #fieldpass = ["used_percent"] - [[outputs.application_insights]] ## Instrumentation key of the Application Insights resource. instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" @@ -691,40 +656,6 @@ # insecure_skip_verify = true -## prometheus custom metrics -# [[inputs.prometheus]] - -# interval = "$AZMON_DS_PROM_INTERVAL" - -# ## An array of urls to scrape metrics from. -# urls = $AZMON_DS_PROM_URLS - -# fieldpass = $AZMON_DS_PROM_FIELDPASS - -# fielddrop = $AZMON_DS_PROM_FIELDDROP - -# metric_version = 2 -# url_tag = "scrapeUrl" - -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" - -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# #tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = true - #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] - #Prometheus Custom Metrics [[inputs.prometheus]] interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" @@ -770,48 +701,6 @@ $AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER ## OSM Prometheus configuration $AZMON_TELEGRAF_OSM_PROM_PLUGINS - -# ##npm -# [[inputs.prometheus]] -# #name_prefix="container.azm.ms/" -# ## An array of urls to scrape metrics from. -# urls = [] - -# #metric_version = 2 -# url_tag = "scrapeUrl" - -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" - -# ## Scrape Kubernetes pods for the following prometheus annotations: -# ## - prometheus.io/scrape: Enable scraping for this pod -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to -# ## set this to `https` & most likely set the tls config. -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. -# ## - prometheus.io/port: If port is not 9102 use this annotation -# monitor_kubernetes_pods = true - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" - -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# #tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = true -# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -# #[inputs.prometheus.tagpass] -# # operation_type = ["create_container", "remove_container", "pull_image"] - # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf index 233600251..52e16f98c 100644 --- a/build/windows/installer/conf/telegraf.conf +++ b/build/windows/installer/conf/telegraf.conf @@ -690,41 +690,6 @@ # tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" # insecure_skip_verify = true - -## prometheus custom metrics -# [[inputs.prometheus]] - -# interval = "$AZMON_DS_PROM_INTERVAL" - -# ## An array of urls to scrape metrics from. -# urls = $AZMON_DS_PROM_URLS - -# fieldpass = $AZMON_DS_PROM_FIELDPASS - -# fielddrop = $AZMON_DS_PROM_FIELDDROP - -# metric_version = 2 -# url_tag = "scrapeUrl" - -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" - -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# #tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = true - #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] - #Prometheus Custom Metrics [[inputs.prometheus]] interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" @@ -766,47 +731,6 @@ $AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER -# ##npm -# [[inputs.prometheus]] -# #name_prefix="container.azm.ms/" -# ## An array of urls to scrape metrics from. -# urls = [] - -# #metric_version = 2 -# url_tag = "scrapeUrl" - -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" - -# ## Scrape Kubernetes pods for the following prometheus annotations: -# ## - prometheus.io/scrape: Enable scraping for this pod -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to -# ## set this to `https` & most likely set the tls config. -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. -# ## - prometheus.io/port: If port is not 9102 use this annotation -# monitor_kubernetes_pods = true - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" - -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# #tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = true -# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -# #[inputs.prometheus.tagpass] -# # operation_type = ["create_container", "remove_container", "pull_image"] - # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index c11d10fed..4fedbc776 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -232,7 +232,6 @@ if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb cat config_env_var | while read line; do - #echo $line echo $line >> ~/.bashrc done source config_env_var @@ -357,7 +356,6 @@ else echo "Making curl request to cadvisor endpoint with port 10255 to get the configured container runtime on kubelet" podWithValidContainerId=$(curl -s http://$NODE_IP:10255/pods | jq -R 'fromjson? | [ .items[] | select( any(.status.phase; contains("Running")) ) ] | .[0]') fi -#podWithValidContainerId=$(curl -s -k -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://$NODE_IP:10250/pods | jq -R 'fromjson? | [ .items[] | .metadata.namespace ] ' ) if [ ! -z "$podWithValidContainerId" ]; then containerRuntime=$(echo $podWithValidContainerId | jq -r '.status.containerStatuses[0].containerID' | cut -d ':' -f 1) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index bcbaffc5d..1216b60de 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -61,7 +61,6 @@ sudo apt-get install libcap2-bin -y #service telegraf stop #wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf -#wget https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf #1.18 pre-release wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_linux_amd64.tar.gz diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 45159e56e..4ce7e7948 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -656,23 +656,12 @@ spec: protocol: TCP name: in-rs-tcp volumeMounts: - - mountPath: /var/run/host - name: docker-sock - - mountPath: /var/log - name: host-log - - mountPath: /etc/kubernetes/host - name: azure-json-path - mountPath: /etc/omsagent-secret name: omsagent-secret readOnly: true - - mountPath: /etc/config - name: omsagent-rs-config - mountPath: /etc/config/settings name: settings-vol-config readOnly: true - - mountPath: /etc/config/settings/adx - name: omsagent-adx-secret - readOnly: true # only in sidecar scraping mode - mountPath: /etc/config/osm-settings name: osm-settings-vol-config @@ -802,8 +791,6 @@ spec: # value: "my_acs_cluster_name" - name: CONTROLLER_TYPE value: "DaemonSet" - # - name: CONTAINER_OS - # value: "Windows" - name: HOSTNAME valueFrom: fieldRef: diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index d60214347..2901cbfb0 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -384,28 +384,6 @@ function Generate-Certificates { C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe } -#Commenting this out since wireserver access is no longer available -# function Bootstrap-CACertificates { -# try { -# # This is required when the root CA certs are different for some clouds. -# $caCerts=Invoke-WebRequest 'http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json' -UseBasicParsing | ConvertFrom-Json -# if (![string]::IsNullOrEmpty($caCerts)) { -# $certificates = $caCerts.Certificates -# for ($index = 0; $index -lt $certificates.Length ; $index++) { -# $name=$certificates[$index].Name -# $certificates[$index].CertBody > $name -# Write-Host "name: $($name)" -# Import-Certificate -FilePath .\$name -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose -# } -# } -# } -# catch { -# $e = $_.Exception -# Write-Host $e -# Write-Host "exception occured in Bootstrap-CACertificates..." -# } -# } - function Test-CertificatePath { $certLocation = $env:CI_CERT_LOCATION $keyLocation = $env:CI_KEY_LOCATION @@ -432,32 +410,10 @@ Remove-WindowsServiceIfItExists "fluentdwinaks" Set-EnvironmentVariables Start-FileSystemWatcher -#Bootstrapping CA certs for non public clouds and AKS clusters -Commenting this out since wireserver access is no longer available -# $aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") -# if (![string]::IsNullOrEmpty($aksResourceId) -and $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) -# { -# Bootstrap-CACertificates -# } - - Generate-Certificates Test-CertificatePath -# # Start telegraf only in sidecar scraping mode -# $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') -# if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') -# { -# Start-Telegraf -# } Start-Fluent-Telegraf -# # Start telegraf only in sidecar scraping mode -# $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') -# if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') -# { -# Start-Telegraf -# } - - # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId From fda92d6bde401d08f07006c0a74c25cb117f8ef8 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 22 Mar 2021 12:30:29 -0700 Subject: [PATCH 167/175] removing unused sections from sidecar container --- kubernetes/omsagent.yaml | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 4ce7e7948..429089022 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -474,32 +474,9 @@ spec: # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" - - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS - value: "koreacentral,norwayeast" securityContext: privileged: true - ports: - - containerPort: 25225 - protocol: TCP - - containerPort: 25224 - protocol: UDP volumeMounts: - - mountPath: /hostfs - name: host-root - readOnly: true - - mountPath: /var/run/host - name: docker-sock - - mountPath: /var/log - name: host-log - - mountPath: /var/lib/docker/containers - name: containerlog-path - readOnly: true - - mountPath: /mnt/docker - name: containerlog-path-2 - readOnly: true - - mountPath: /mnt/containers - name: containerlog-path-3 - readOnly: true - mountPath: /etc/kubernetes/host name: azure-json-path - mountPath: /etc/omsagent-secret @@ -508,9 +485,6 @@ spec: - mountPath: /etc/config/settings name: settings-vol-config readOnly: true - - mountPath: /etc/config/settings/adx - name: omsagent-adx-secret - readOnly: true - mountPath: /etc/config/osm-settings name: osm-settings-vol-config readOnly: true @@ -662,7 +636,7 @@ spec: - mountPath: /etc/config/settings name: settings-vol-config readOnly: true -# only in sidecar scraping mode + # only in sidecar scraping mode - mountPath: /etc/config/osm-settings name: osm-settings-vol-config readOnly: true From 9c8d50e419208e717890400c1e9f5623b5414532 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Mon, 22 Mar 2021 12:51:06 -0700 Subject: [PATCH 168/175] accidentally removed mounts from rs --- kubernetes/omsagent.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 429089022..2dff3f32a 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -630,12 +630,22 @@ spec: protocol: TCP name: in-rs-tcp volumeMounts: + - mountPath: /var/run/host + name: docker-sock + - mountPath: /var/log + name: host-log + - mountPath: /etc/kubernetes/host + name: azure-json-path - mountPath: /etc/omsagent-secret name: omsagent-secret readOnly: true + - mountPath: /etc/config + name: omsagent-rs-config - mountPath: /etc/config/settings name: settings-vol-config readOnly: true + - mountPath: /etc/config/settings/adx + name: omsagent-adx-secret # only in sidecar scraping mode - mountPath: /etc/config/osm-settings name: osm-settings-vol-config From 41f50338550703453d6f225db5889add5c6d5ee2 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Mar 2021 10:18:16 -0700 Subject: [PATCH 169/175] changes --- build/linux/installer/conf/telegraf-prom-side-car.conf | 4 ++-- kubernetes/omsagent.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index a33ee21fd..1a4b38cc7 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -40,13 +40,13 @@ ## Telegraf will send metrics to outputs in batches of at most ## metric_batch_size metrics. ## This controls the size of writes that Telegraf sends to output plugins. - metric_batch_size = 1000 + metric_batch_size = 3000 ## For failed writes, telegraf will cache metric_buffer_limit metrics for each ## output, and will flush this buffer on a successful write. Oldest metrics ## are dropped first when this buffer fills. ## This buffer only fills when writes fail to output plugin(s). - metric_buffer_limit = 10000 + metric_buffer_limit = 60000 ## Collection jitter is used to jitter the collection by a random amount. ## Each plugin will sleep for a random time within jitter before collecting. diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 2dff3f32a..1a0b3de80 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -449,8 +449,8 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 250m - memory: 600Mi + cpu: 500m + memory: 400Mi requests: cpu: 75m memory: 225Mi From 3014f224626807fdc657906f325d910e3fd5f3c2 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Mar 2021 10:50:35 -0700 Subject: [PATCH 170/175] increasing fluentbit buffer sizes --- .../linux/installer/conf/td-agent-bit-prom-side-car.conf | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 4c7be6959..703426515 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -16,9 +16,11 @@ Tag oms.container.perf.telegraf.* Listen 0.0.0.0 Port 25229 - Chunk_Size 32 - Buffer_Size 64 - Mem_Buf_Limit 5m + # Chunk_Size 32 + # Buffer_Size 64 + Chunk_Size 1m + Buffer_Size 1m + Mem_Buf_Limit 20m [OUTPUT] Name oms From 44f8a7d6a86c04d276edadb340ef37a04e5692d7 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Tue, 23 Mar 2021 17:31:09 -0700 Subject: [PATCH 171/175] getting latest telegraf release --- kubernetes/linux/setup.sh | 4 +++- kubernetes/windows/setup.ps1 | 3 +-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 1216b60de..b61b259a4 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -64,7 +64,9 @@ sudo apt-get install libcap2-bin -y #1.18 pre-release wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_linux_amd64.tar.gz -tar -zxvf telegraf-1.18.0~rc1_linux_amd64.tar.gz + +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_linux_amd64.tar.gz +tar -zxvf telegraf-1.18.0_linux_amd64.tar.gz mv /opt/telegraf-1.18.0/usr/bin/telegraf /opt/telegraf diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index e2a2e82e0..25aad5e16 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -36,8 +36,7 @@ Write-Host ('Finished Installing Fluentbit') Write-Host ('Installing Telegraf'); try { - # $telegrafUri='https://github.com/microsoft/Docker-Provider/releases/download/telegraf-test-pr/telegraf-win.zip' - $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_windows_amd64.zip' + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_windows_amd64.zip' Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue From 09bb40e3e6b270bace3f3e97d917d49722d459b4 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 24 Mar 2021 18:50:58 -0700 Subject: [PATCH 172/175] some bug fixes --- kubernetes/linux/setup.sh | 2 -- source/plugins/go/src/telemetry.go | 8 +++++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index b61b259a4..218e3c717 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -63,8 +63,6 @@ sudo apt-get install libcap2-bin -y #wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf #1.18 pre-release -wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0~rc1_linux_amd64.tar.gz - wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_linux_amd64.tar.gz tar -zxvf telegraf-1.18.0_linux_amd64.tar.gz diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 25c77c6f7..48f82a9ab 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -137,7 +137,6 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { telemetryDimensions := make(map[string]string) - telemetryDimensions["ContainerType"] = "prometheussidecar" telemetryDimensions["CustomPromMonitorPods"] = promMonitorPods if promMonitorPodsNamespaceLength > 0 { telemetryDimensions["CustomPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) @@ -300,6 +299,13 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { CommonProperties["IsProxyConfigured"] = "false" } + // Adding container type to telemetry + if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { + if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { + CommonProperties["ContainerType"] = "prometheussidecar" + } + } + TelemetryClient.Context().CommonProperties = CommonProperties // Getting the namespace count, monitor kubernetes pods values and namespace count once at start because it wont change unless the configmap is applied and the container is restarted From 7569b46e3a8d76eb8b83b84fe65636491b11bfa4 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Wed, 24 Mar 2021 19:14:33 -0700 Subject: [PATCH 173/175] fixing PR comments --- .../installer/conf/prometheus-side-car.conf | 8 - .../conf/td-agent-bit-prom-side-car.conf | 2 - .../conf/telegraf-prom-side-car.conf | 575 +----------------- build/linux/installer/conf/telegraf-rs.conf | 6 +- kubernetes/container-azm-ms-osmconfig.yaml | 2 +- kubernetes/linux/main.sh | 10 +- 6 files changed, 12 insertions(+), 591 deletions(-) diff --git a/build/linux/installer/conf/prometheus-side-car.conf b/build/linux/installer/conf/prometheus-side-car.conf index 31bc5f5ab..fd40910d9 100644 --- a/build/linux/installer/conf/prometheus-side-car.conf +++ b/build/linux/installer/conf/prometheus-side-car.conf @@ -1,11 +1,3 @@ -# Fluentd config file for OMS Docker - container components (non kubeAPI) - -# Forward port 25225 for container logs -# -# type forward -# port 25225 -# bind 127.0.0.1 -# diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 703426515..720f54820 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -16,8 +16,6 @@ Tag oms.container.perf.telegraf.* Listen 0.0.0.0 Port 25229 - # Chunk_Size 32 - # Buffer_Size 64 Chunk_Size 1m Buffer_Size 1m Mem_Buf_Limit 20m diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf index 1a4b38cc7..b3b4ba1d3 100644 --- a/build/linux/installer/conf/telegraf-prom-side-car.conf +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -16,19 +16,8 @@ # Global tags can be specified here in key="value" format. [global_tags] - #Below are entirely used for telemetry - #AgentVersion = "$AGENT_VERSION" - #AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" - #ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" - #Region = "$TELEMETRY_AKS_REGION" - #ClusterName = "$TELEMETRY_CLUSTER_NAME" - #ClusterType = "$TELEMETRY_CLUSTER_TYPE" - #Computer = "placeholder_hostname" - #ControllerType = "$CONTROLLER_TYPE" - hostName = "placeholder_hostname" - # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs @@ -73,11 +62,11 @@ ## Logging configuration: ## Run telegraf with debug log messages. - debug = true + debug = false ## Run telegraf in quiet mode (error log messages only). - quiet = false + quiet = true ## Specify the log file name. The empty string means to log to stderr. - logfile = "/opt/new-telegraf-logs.txt" + logfile = "" ## Override default hostname, if empty use os.Hostname() #hostname = "placeholder_hostname" ## If set to true, do no set the "host" tag in the telegraf agent. @@ -121,27 +110,6 @@ ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md data_format = "json" namedrop = ["agent_telemetry"] - #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] - -[[outputs.application_insights]] - ## Instrumentation key of the Application Insights resource. - instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" - - ## Timeout for closing (default: 5s). - # timeout = "5s" - - ## Enable additional diagnostic logging. - # enable_diagnostic_logging = false - - ## Context Tag Sources add Application Insights context tags to a tag value. - ## - ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go - # [outputs.application_insights.context_tag_sources] - # "ai.cloud.role" = "kubernetes_container_name" - # "ai.cloud.roleInstance" = "kubernetes_pod_name" - namepass = ["agent_telemetry"] - #tagdrop = ["nodeName"] ############################################################################### # PROCESSOR PLUGINS # @@ -150,511 +118,6 @@ [[processors.converter]] [processors.converter.fields] float = ["*"] -# # Perform string processing on tags, fields, and measurements -#[[processors.rename]] - #[[processors.rename.replace]] - # measurement = "disk" - # dest = "nodes" -# [[processors.rename.replace]] -# field = "free" -# dest = "freeBytes" -# [[processors.rename.replace]] -# field = "used" -# dest = "usedBytes" -# [[processors.rename.replace]] -# field = "used_percent" -# dest = "usedPercentage" - #[[processors.rename.replace]] - # measurement = "net" - # dest = "nodes" - #[[processors.rename.replace]] - # field = "bytes_recv" - # dest = "networkBytesReceivedTotal" - #[[processors.rename.replace]] - # field = "bytes_sent" - # dest = "networkBytesSentTotal" - #[[processors.rename.replace]] - # field = "err_in" - # dest = "networkErrorsInTotal" - #[[processors.rename.replace]] - # field = "err_out" - # dest = "networkErrorsOutTotal" - #[[processors.rename.replace]] - # measurement = "kubernetes_pod_volume" - # dest = "pods" - #[[processors.rename.replace]] - # field = "used_bytes" - # dest = "podVolumeUsedBytes" - #[[processors.rename.replace]] - # field = "available_bytes" - # dest = "podVolumeAvailableBytes" - #[[processors.rename.replace]] - # measurement = "kubernetes_pod_network" - # dest = "pods" - #[[processors.rename.replace]] - # field = "tx_errors" - # dest = "podNetworkTxErrorsTotal" - #[[processors.rename.replace]] - # field = "rx_errors" - # dest = "podNetworkRxErrorsTotal" - #[[processors.rename.replace]] - # tag = "volume_name" - # dest = "volumeName" - #[[processors.rename.replace]] - # tag = "pod_name" - # dest = "podName" - #[[processors.rename.replace]] - # measurement = "docker" - # dest = "containers" - #[[processors.rename.replace]] - # measurement = "docker_container_status" - # dest = "containers" - #[[processors.rename.replace]] - # field = "n_containers" - # dest = "numContainers" - #[[processors.rename.replace]] - # field = "n_containers_running" - # dest = "numContainersRunning" - #[[processors.rename.replace]] - # field = "n_containers_stopped" - # dest = "numContainersStopped" - #[[processors.rename.replace]] - # field = "n_containers_paused" - # dest = "numContainersPaused" - #[[processors.rename.replace]] - # field = "n_images" - # dest = "numContainerImages" - -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# -# ## Convert a field value to lowercase and store in a new field -# # [[processors.strings.lowercase]] -# # field = "uri_stem" -# # dest = "uri_stem_normalised" -# -# ## Trim leading and trailing whitespace using the default cutset -# # [[processors.strings.trim]] -# # field = "message" -# -# ## Trim leading characters in cutset -# # [[processors.strings.trim_left]] -# # field = "message" -# # cutset = "\t" -# -# ## Trim trailing characters in cutset -# # [[processors.strings.trim_right]] -# # field = "message" -# # cutset = "\r\n" -# -# ## Trim the given prefix from the field -# # [[processors.strings.trim_prefix]] -# # field = "my_value" -# # prefix = "my_" -# -# ## Trim the given suffix from the field -# # [[processors.strings.trim_suffix]] -# # field = "read_count" -# # suffix = "_count" - - -# # Print all metrics that pass through this filter. -# [[processors.topk]] -# ## How many seconds between aggregations -# # period = 10 -# -# ## How many top metrics to return -# # k = 10 -# -# ## Over which tags should the aggregation be done. Globs can be specified, in -# ## which case any tag matching the glob will aggregated over. If set to an -# ## empty list is no aggregation over tags is done -# # group_by = ['*'] -# -# ## Over which fields are the top k are calculated -# # fields = ["value"] -# -# ## What aggregation to use. Options: sum, mean, min, max -# # aggregation = "mean" -# -# ## Instead of the top k largest metrics, return the bottom k lowest metrics -# # bottomk = false -# -# ## The plugin assigns each metric a GroupBy tag generated from its name and -# ## tags. If this setting is different than "" the plugin will add a -# ## tag (which name will be the value of this setting) to each metric with -# ## the value of the calculated GroupBy tag. Useful for debugging -# # add_groupby_tag = "" -# -# ## These settings provide a way to know the position of each metric in -# ## the top k. The 'add_rank_field' setting allows to specify for which -# ## fields the position is required. If the list is non empty, then a field -# ## will be added to each and every metric for each string present in this -# ## setting. This field will contain the ranking of the group that -# ## the metric belonged to when aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_rank' -# # add_rank_fields = [] -# -# ## These settings provide a way to know what values the plugin is generating -# ## when aggregating metrics. The 'add_agregate_field' setting allows to -# ## specify for which fields the final aggregation value is required. If the -# ## list is non empty, then a field will be added to each every metric for -# ## each field present in this setting. This field will contain -# ## the computed aggregation for the group that the metric belonged to when -# ## aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_aggregate' -# # add_aggregate_fields = [] - - - -############################################################################### -# AGGREGATOR PLUGINS # -############################################################################### - -# # Keep the aggregate basicstats of each metric passing through. -# [[aggregators.basicstats]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false - - -# # Create aggregate histograms. -# [[aggregators.histogram]] -# ## The period in which to flush the aggregator. -# period = "30s" -# -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# -# ## Example config that aggregates all fields of the metric. -# # [[aggregators.histogram.config]] -# # ## The set of buckets. -# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] -# # ## The name of metric. -# # measurement_name = "cpu" -# -# ## Example config that aggregates only specific fields of the metric. -# # [[aggregators.histogram.config]] -# # ## The set of buckets. -# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] -# # ## The name of metric. -# # measurement_name = "diskio" -# # ## The concrete fields of metric -# # fields = ["io_time", "read_time", "write_time"] - - -# # Keep the aggregate min/max of each metric passing through. -# [[aggregators.minmax]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false - - -# # Count the occurance of values in fields. -# [[aggregators.valuecounter]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# ## The fields for which the values will be counted -# fields = [] - - - -############################################################################### -# INPUT PLUGINS # -############################################################################### - -# Read metrics about cpu usage -#[[inputs.cpu]] - ## Whether to report per-cpu stats or not -# percpu = false - ## Whether to report total system cpu stats or not -# totalcpu = true - ## If true, collect raw CPU time metrics. -# collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. -# report_active = true -# fieldpass = ["usage_active","cluster","node","host","device"] -# taginclude = ["cluster","cpu","node"] - - - -# Read metrics about disk usage by mount point -# [[inputs.disk]] -# name_prefix="container.azm.ms/" -# ## By default stats will be gathered for all mount points. -# ## Set mount_points will restrict the stats to only the specified mount points. -# # mount_points = ["/"] - -# ## Ignore mount points by filesystem type. -# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] -# fieldpass = ["free", "used", "used_percent"] -# taginclude = ["device","path","hostName"] -# # Below due to Bug - https://github.com/influxdata/telegraf/issues/5615 -# # ORDER matters here!! - i.e the below should be the LAST modifier -# [inputs.disk.tagdrop] -# path = ["/var/lib/kubelet*", "/dev/termination-log", "/var/log", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/etc/kubernetes/host", "/var/lib/docker/containers", "/etc/config/settings"] - - -# # Read metrics about memory usage -# #[[inputs.mem]] -# # fieldpass = ["used_percent", "cluster", "node","host","device"] -# # taginclude = ["cluster","node"] - -# # Read metrics about disk IO by device -# [[inputs.diskio]] -# name_prefix="container.azm.ms/" -# ## By default, telegraf will gather stats for all devices including -# ## disk partitions. -# ## Setting devices will restrict the stats to the specified devices. -# devices = ["sd[a-z][0-9]"] -# ## Uncomment the following line if you need disk serial numbers. -# # skip_serial_number = false -# # -# ## On systems which support it, device metadata can be added in the form of -# ## tags. -# ## Currently only Linux is supported via udev properties. You can view -# ## available properties for a device by running: -# ## 'udevadm info -q property -n /dev/sda' -# ## Note: Most, but not all, udev properties can be accessed this way. Properties -# ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. -# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] -# # -# ## Using the same metadata source as device_tags, you can also customize the -# ## name of the device via templates. -# ## The 'name_templates' parameter is a list of templates to try and apply to -# ## the device. The template may contain variables in the form of '$PROPERTY' or -# ## '${PROPERTY}'. The first template which does not contain any variables not -# ## present for the device is used as the device name tag. -# ## The typical use case is for LVM volumes, to get the VG/LV name instead of -# ## the near-meaningless DM-0 name. -# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] -# fieldpass = ["reads", "read_bytes", "read_time", "writes", "write_bytes", "write_time", "io_time", "iops_in_progress"] -# taginclude = ["name","hostName"] - -# # Read metrics about network interface usage -# [[inputs.net]] -# name_prefix="container.azm.ms/" -# ## By default, telegraf gathers stats from any up interface (excluding loopback) -# ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. -# ## -# # interfaces = ["eth0"] -# ## -# ## On linux systems telegraf also collects protocol stats. -# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. -# ## -# ignore_protocol_stats = true -# ## -# fieldpass = ["bytes_recv", "bytes_sent", "err_in", "err_out"] -# taginclude = ["interface","hostName"] - -# Read metrics from the kubernetes kubelet api -#[[inputs.kubernetes]] - ## URL for the kubelet - #url = "http://1.1.1.1:10255" -# url = "http://placeholder_nodeip:10255" - - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -# fieldpass = ["used_bytes", "available_bytes", "tx_errors", "rx_errors" ] -# taginclude = ["volume_name","nodeName","namespace","pod_name"] -# Read metrics about docker containers -#[[inputs.docker]] - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" -# endpoint = "unix:///var/run/host/docker.sock" - - ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) -# gather_services = false - - ## Only collect metrics for these containers, collect all if empty -# container_names = [] - - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers -# container_name_include = [] -# container_name_exclude = [] - - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. -# container_state_include = ['*'] - # container_state_exclude = [] - - ## Timeout for docker list, info, and stats commands -# timeout = "5s" - - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not -# perdevice = true - ## Whether to report for each container total blkio and network stats or not -# total = true - ## Which environment variables should we use as a tag - ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] - - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags -# docker_label_include = [] -# docker_label_exclude = [] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -# fieldpass = ["n_containers", "n_containers_running", "n_containers_stopped", "n_containers_paused", "n_images"] - #fieldpass = ["numContainers", "numContainersRunning", "numContainersStopped", "numContainersPaused", "numContainerImages"] -# taginclude = ["nodeName"] - -#[[inputs.procstat]] -# #name_prefix="t.azm.ms/" -# exe = "mdsd" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# [[inputs.procstat]] -# #name_prefix="container.azm.ms/" -# exe = "ruby" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# [[inputs.procstat]] -# #name_prefix="container.azm.ms/" -# exe = "td-agent-bit" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# [[inputs.procstat]] -# #name_prefix="container.azm.ms/" -# exe = "telegraf" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" - -#kubelet-1 -# [[inputs.prometheus]] -# name_prefix="container.azm.ms/" -# ## An array of urls to scrape metrics from. -# urls = ["$CADVISOR_METRICS_URL"] -# fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC"] - -# metric_version = 2 -# url_tag = "scrapeUrl" - -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" - -# ## Scrape Kubernetes pods for the following prometheus annotations: -# ## - prometheus.io/scrape: Enable scraping for this pod -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to -# ## set this to `https` & most likely set the tls config. -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. -# ## - prometheus.io/port: If port is not 9102 use this annotation -# # monitor_kubernetes_pods = true - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" - -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# #tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = true -# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -# [inputs.prometheus.tagpass] -# operation_type = ["create_container", "remove_container", "pull_image"] - -# #kubelet-2 -# [[inputs.prometheus]] -# name_prefix="container.azm.ms/" -# ## An array of urls to scrape metrics from. -# urls = ["$CADVISOR_METRICS_URL"] - -# fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] - -# metric_version = 2 -# url_tag = "scrapeUrl" - - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# insecure_skip_verify = true - #Prometheus Custom Metrics [[inputs.prometheus]] @@ -690,42 +153,10 @@ ## Optional TLS Config tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" - #tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile ## Use TLS but skip chain & host verification insecure_skip_verify = true - #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] $AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER ## OSM Prometheus configuration $AZMON_TELEGRAF_OSM_PROM_PLUGINS - -# [[inputs.exec]] -# ## Commands array -# interval = "15m" -# commands = [ -# "/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh" -# ] - -# ## Timeout for each command to complete. -# timeout = "15s" - -# ## measurement name suffix (for separating different commands) -# name_suffix = "_telemetry" - -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# tagexclude = ["hostName"] -# [inputs.exec.tags] -# AgentVersion = "$AGENT_VERSION" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# ClusterName = "$TELEMETRY_CLUSTER_NAME" -# ClusterType = "$TELEMETRY_CLUSTER_TYPE" -# Computer = "placeholder_hostname" -# ControllerType = "$CONTROLLER_TYPE" \ No newline at end of file diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index f534bb992..ee1cf8819 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -73,11 +73,11 @@ ## Logging configuration: ## Run telegraf with debug log messages. - debug = true + debug = false ## Run telegraf in quiet mode (error log messages only). - quiet = false + quiet = true ## Specify the log file name. The empty string means to log to stderr. - logfile = "/opt/telegraf-logs-debug.txt" + logfile = "" ## Override default hostname, if empty use os.Hostname() #hostname = "placeholder_hostname" diff --git a/kubernetes/container-azm-ms-osmconfig.yaml b/kubernetes/container-azm-ms-osmconfig.yaml index 55ef45f07..05b7ac3ed 100644 --- a/kubernetes/container-azm-ms-osmconfig.yaml +++ b/kubernetes/container-azm-ms-osmconfig.yaml @@ -11,7 +11,7 @@ data: # OSM metric collection settings [osm_metric_collection_configuration.settings] # Namespaces to monitor - monitor_namespaces = ["namespace1", "namespace2"] + # monitor_namespaces = ["namespace1", "namespace2"] metadata: name: container-azm-ms-osmconfig namespace: kube-system diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 4fedbc776..71e46875b 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -3,7 +3,7 @@ if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "rashmi-in-ds-prom-omsagent-conf" + echo "setting omsagent conf file for prometheus sidecar" cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf # omsadmin.sh replaces %MONITOR_AGENT_PORT% and %SYSLOG_PORT% in the monitor.conf and syslog.conf with default ports 25324 and 25224. # Since we are running 2 omsagents in the same pod, we need to use a different port for the sidecar, @@ -12,7 +12,7 @@ elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25326/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf sed -i -e 's/port %SYSLOG_PORT%/port 25226/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf else - echo "rashmi-in-ds-omsagent-conf" + echo "setting omsagent conf file for daemonset" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf fi sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf @@ -622,11 +622,11 @@ fi #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then - echo "in side car................" + echo "starting fluent-bit and setting telegraf conf file for prometheus sidecar" /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" else - echo "in ds................" + echo "starting fluent-bit and setting telegraf conf file for daemonset" if [ "$CONTAINER_RUNTIME" == "docker" ]; then /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" @@ -638,7 +638,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then fi fi else - echo "in rs..............." + echo "starting fluent-bit and setting telegraf conf file for replicaset" /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi From 045b586661c9c4ee8f102b2bf2918f286d490d31 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 25 Mar 2021 12:27:10 -0700 Subject: [PATCH 174/175] removing commented lines --- build/windows/installer/conf/telegraf.conf | 605 +-------------------- 1 file changed, 3 insertions(+), 602 deletions(-) diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf index 52e16f98c..5f4d2364e 100644 --- a/build/windows/installer/conf/telegraf.conf +++ b/build/windows/installer/conf/telegraf.conf @@ -16,16 +16,6 @@ # Global tags can be specified here in key="value" format. [global_tags] - #Below are entirely used for telemetry - #AgentVersion = "$AGENT_VERSION" - #AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" - #ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" - #Region = "$TELEMETRY_AKS_REGION" - #ClusterName = "$TELEMETRY_CLUSTER_NAME" - #ClusterType = "$TELEMETRY_CLUSTER_TYPE" - #Computer = "placeholder_hostname" - #ControllerType = "$CONTROLLER_TYPE" - hostName = "placeholder_hostname" @@ -73,11 +63,11 @@ ## Logging configuration: ## Run telegraf with debug log messages. - debug = true + debug = false ## Run telegraf in quiet mode (error log messages only). - quiet = false + quiet = true ## Specify the log file name. The empty string means to log to stderr. - logfile = "/opt/new-telegraf-logs.txt" + logfile = "" ## Override default hostname, if empty use os.Hostname() #hostname = "placeholder_hostname" ## If set to true, do no set the "host" tag in the telegraf agent. @@ -123,61 +113,6 @@ namedrop = ["agent_telemetry"] #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] -# # Output to send MDM metrics to fluent bit and then route it to fluentD -# [[outputs.socket_writer]] -# ## URL to connect to -# address = "tcp://0.0.0.0:25228" -# # address = "tcp://example.com:http" -# # address = "tcp4://127.0.0.1:8094" -# # address = "tcp6://127.0.0.1:8094" -# # address = "tcp6://[2001:db8::1]:8094" -# # address = "udp://127.0.0.1:8094" -# # address = "udp4://127.0.0.1:8094" -# # address = "udp6://127.0.0.1:8094" -# # address = "unix:///tmp/telegraf.sock" -# # address = "unixgram:///tmp/telegraf.sock" - -# ## Optional TLS Config -# # tls_ca = "/etc/telegraf/ca.pem" -# # tls_cert = "/etc/telegraf/cert.pem" -# # tls_key = "/etc/telegraf/key.pem" -# ## Use TLS but skip chain & host verification -# # insecure_skip_verify = false - -# ## Period between keep alive probes. -# ## Only applies to TCP sockets. -# ## 0 disables keep alive probes. -# ## Defaults to the OS configuration. -# # keep_alive_period = "5m" - -# ## Data format to generate. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "json" -# namepass = ["container.azm.ms/disk"] -# #fieldpass = ["used_percent"] - -[[outputs.application_insights]] - ## Instrumentation key of the Application Insights resource. - instrumentation_key = "$TELEMETRY_APPLICATIONINSIGHTS_KEY" - - ## Timeout for closing (default: 5s). - # timeout = "5s" - - ## Enable additional diagnostic logging. - # enable_diagnostic_logging = false - - ## Context Tag Sources add Application Insights context tags to a tag value. - ## - ## For list of allowed context tag keys see: - ## https://github.com/Microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/contexttagkeys.go - # [outputs.application_insights.context_tag_sources] - # "ai.cloud.role" = "kubernetes_container_name" - # "ai.cloud.roleInstance" = "kubernetes_pod_name" - namepass = ["agent_telemetry"] - #tagdrop = ["nodeName"] - ############################################################################### # PROCESSOR PLUGINS # ############################################################################### @@ -185,510 +120,6 @@ [[processors.converter]] [processors.converter.fields] float = ["*"] -# # Perform string processing on tags, fields, and measurements -#[[processors.rename]] - #[[processors.rename.replace]] - # measurement = "disk" - # dest = "nodes" -# [[processors.rename.replace]] -# field = "free" -# dest = "freeBytes" -# [[processors.rename.replace]] -# field = "used" -# dest = "usedBytes" -# [[processors.rename.replace]] -# field = "used_percent" -# dest = "usedPercentage" - #[[processors.rename.replace]] - # measurement = "net" - # dest = "nodes" - #[[processors.rename.replace]] - # field = "bytes_recv" - # dest = "networkBytesReceivedTotal" - #[[processors.rename.replace]] - # field = "bytes_sent" - # dest = "networkBytesSentTotal" - #[[processors.rename.replace]] - # field = "err_in" - # dest = "networkErrorsInTotal" - #[[processors.rename.replace]] - # field = "err_out" - # dest = "networkErrorsOutTotal" - #[[processors.rename.replace]] - # measurement = "kubernetes_pod_volume" - # dest = "pods" - #[[processors.rename.replace]] - # field = "used_bytes" - # dest = "podVolumeUsedBytes" - #[[processors.rename.replace]] - # field = "available_bytes" - # dest = "podVolumeAvailableBytes" - #[[processors.rename.replace]] - # measurement = "kubernetes_pod_network" - # dest = "pods" - #[[processors.rename.replace]] - # field = "tx_errors" - # dest = "podNetworkTxErrorsTotal" - #[[processors.rename.replace]] - # field = "rx_errors" - # dest = "podNetworkRxErrorsTotal" - #[[processors.rename.replace]] - # tag = "volume_name" - # dest = "volumeName" - #[[processors.rename.replace]] - # tag = "pod_name" - # dest = "podName" - #[[processors.rename.replace]] - # measurement = "docker" - # dest = "containers" - #[[processors.rename.replace]] - # measurement = "docker_container_status" - # dest = "containers" - #[[processors.rename.replace]] - # field = "n_containers" - # dest = "numContainers" - #[[processors.rename.replace]] - # field = "n_containers_running" - # dest = "numContainersRunning" - #[[processors.rename.replace]] - # field = "n_containers_stopped" - # dest = "numContainersStopped" - #[[processors.rename.replace]] - # field = "n_containers_paused" - # dest = "numContainersPaused" - #[[processors.rename.replace]] - # field = "n_images" - # dest = "numContainerImages" - -# ## Convert a tag value to uppercase -# # [[processors.strings.uppercase]] -# # tag = "method" -# -# ## Convert a field value to lowercase and store in a new field -# # [[processors.strings.lowercase]] -# # field = "uri_stem" -# # dest = "uri_stem_normalised" -# -# ## Trim leading and trailing whitespace using the default cutset -# # [[processors.strings.trim]] -# # field = "message" -# -# ## Trim leading characters in cutset -# # [[processors.strings.trim_left]] -# # field = "message" -# # cutset = "\t" -# -# ## Trim trailing characters in cutset -# # [[processors.strings.trim_right]] -# # field = "message" -# # cutset = "\r\n" -# -# ## Trim the given prefix from the field -# # [[processors.strings.trim_prefix]] -# # field = "my_value" -# # prefix = "my_" -# -# ## Trim the given suffix from the field -# # [[processors.strings.trim_suffix]] -# # field = "read_count" -# # suffix = "_count" - - -# # Print all metrics that pass through this filter. -# [[processors.topk]] -# ## How many seconds between aggregations -# # period = 10 -# -# ## How many top metrics to return -# # k = 10 -# -# ## Over which tags should the aggregation be done. Globs can be specified, in -# ## which case any tag matching the glob will aggregated over. If set to an -# ## empty list is no aggregation over tags is done -# # group_by = ['*'] -# -# ## Over which fields are the top k are calculated -# # fields = ["value"] -# -# ## What aggregation to use. Options: sum, mean, min, max -# # aggregation = "mean" -# -# ## Instead of the top k largest metrics, return the bottom k lowest metrics -# # bottomk = false -# -# ## The plugin assigns each metric a GroupBy tag generated from its name and -# ## tags. If this setting is different than "" the plugin will add a -# ## tag (which name will be the value of this setting) to each metric with -# ## the value of the calculated GroupBy tag. Useful for debugging -# # add_groupby_tag = "" -# -# ## These settings provide a way to know the position of each metric in -# ## the top k. The 'add_rank_field' setting allows to specify for which -# ## fields the position is required. If the list is non empty, then a field -# ## will be added to each and every metric for each string present in this -# ## setting. This field will contain the ranking of the group that -# ## the metric belonged to when aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_rank' -# # add_rank_fields = [] -# -# ## These settings provide a way to know what values the plugin is generating -# ## when aggregating metrics. The 'add_agregate_field' setting allows to -# ## specify for which fields the final aggregation value is required. If the -# ## list is non empty, then a field will be added to each every metric for -# ## each field present in this setting. This field will contain -# ## the computed aggregation for the group that the metric belonged to when -# ## aggregated over that field. -# ## The name of the field will be set to the name of the aggregation field, -# ## suffixed with the string '_topk_aggregate' -# # add_aggregate_fields = [] - - - -############################################################################### -# AGGREGATOR PLUGINS # -############################################################################### - -# # Keep the aggregate basicstats of each metric passing through. -# [[aggregators.basicstats]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false - - -# # Create aggregate histograms. -# [[aggregators.histogram]] -# ## The period in which to flush the aggregator. -# period = "30s" -# -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# -# ## Example config that aggregates all fields of the metric. -# # [[aggregators.histogram.config]] -# # ## The set of buckets. -# # buckets = [0.0, 15.6, 34.5, 49.1, 71.5, 80.5, 94.5, 100.0] -# # ## The name of metric. -# # measurement_name = "cpu" -# -# ## Example config that aggregates only specific fields of the metric. -# # [[aggregators.histogram.config]] -# # ## The set of buckets. -# # buckets = [0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] -# # ## The name of metric. -# # measurement_name = "diskio" -# # ## The concrete fields of metric -# # fields = ["io_time", "read_time", "write_time"] - - -# # Keep the aggregate min/max of each metric passing through. -# [[aggregators.minmax]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false - - -# # Count the occurance of values in fields. -# [[aggregators.valuecounter]] -# ## General Aggregator Arguments: -# ## The period on which to flush & clear the aggregator. -# period = "30s" -# ## If true, the original metric will be dropped by the -# ## aggregator and will not get sent to the output plugins. -# drop_original = false -# ## The fields for which the values will be counted -# fields = [] - - - -############################################################################### -# INPUT PLUGINS # -############################################################################### - -# Read metrics about cpu usage -#[[inputs.cpu]] - ## Whether to report per-cpu stats or not -# percpu = false - ## Whether to report total system cpu stats or not -# totalcpu = true - ## If true, collect raw CPU time metrics. -# collect_cpu_time = false - ## If true, compute and report the sum of all non-idle CPU states. -# report_active = true -# fieldpass = ["usage_active","cluster","node","host","device"] -# taginclude = ["cluster","cpu","node"] - - - -# Read metrics about disk usage by mount point -# [[inputs.disk]] -# name_prefix="container.azm.ms/" -# ## By default stats will be gathered for all mount points. -# ## Set mount_points will restrict the stats to only the specified mount points. -# # mount_points = ["/"] - -# ## Ignore mount points by filesystem type. -# ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] -# fieldpass = ["free", "used", "used_percent"] -# taginclude = ["device","path","hostName"] -# # Below due to Bug - https://github.com/influxdata/telegraf/issues/5615 -# # ORDER matters here!! - i.e the below should be the LAST modifier -# [inputs.disk.tagdrop] -# path = ["/var/lib/kubelet*", "/dev/termination-log", "/var/log", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/etc/kubernetes/host", "/var/lib/docker/containers", "/etc/config/settings"] - - -# # Read metrics about memory usage -# #[[inputs.mem]] -# # fieldpass = ["used_percent", "cluster", "node","host","device"] -# # taginclude = ["cluster","node"] - -# # Read metrics about disk IO by device -# [[inputs.diskio]] -# name_prefix="container.azm.ms/" -# ## By default, telegraf will gather stats for all devices including -# ## disk partitions. -# ## Setting devices will restrict the stats to the specified devices. -# devices = ["sd[a-z][0-9]"] -# ## Uncomment the following line if you need disk serial numbers. -# # skip_serial_number = false -# # -# ## On systems which support it, device metadata can be added in the form of -# ## tags. -# ## Currently only Linux is supported via udev properties. You can view -# ## available properties for a device by running: -# ## 'udevadm info -q property -n /dev/sda' -# ## Note: Most, but not all, udev properties can be accessed this way. Properties -# ## that are currently inaccessible include DEVTYPE, DEVNAME, and DEVPATH. -# # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] -# # -# ## Using the same metadata source as device_tags, you can also customize the -# ## name of the device via templates. -# ## The 'name_templates' parameter is a list of templates to try and apply to -# ## the device. The template may contain variables in the form of '$PROPERTY' or -# ## '${PROPERTY}'. The first template which does not contain any variables not -# ## present for the device is used as the device name tag. -# ## The typical use case is for LVM volumes, to get the VG/LV name instead of -# ## the near-meaningless DM-0 name. -# # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] -# fieldpass = ["reads", "read_bytes", "read_time", "writes", "write_bytes", "write_time", "io_time", "iops_in_progress"] -# taginclude = ["name","hostName"] - -# # Read metrics about network interface usage -# [[inputs.net]] -# name_prefix="container.azm.ms/" -# ## By default, telegraf gathers stats from any up interface (excluding loopback) -# ## Setting interfaces will tell it to gather these explicit interfaces, -# ## regardless of status. -# ## -# # interfaces = ["eth0"] -# ## -# ## On linux systems telegraf also collects protocol stats. -# ## Setting ignore_protocol_stats to true will skip reporting of protocol metrics. -# ## -# ignore_protocol_stats = true -# ## -# fieldpass = ["bytes_recv", "bytes_sent", "err_in", "err_out"] -# taginclude = ["interface","hostName"] - -# Read metrics from the kubernetes kubelet api -#[[inputs.kubernetes]] - ## URL for the kubelet - #url = "http://1.1.1.1:10255" -# url = "http://placeholder_nodeip:10255" - - ## Use bearer token for authorization - # bearer_token = /path/to/bearer/token - - ## Set response_timeout (default 5 seconds) - # response_timeout = "5s" - - ## Optional TLS Config - # tls_ca = /path/to/cafile - # tls_cert = /path/to/certfile - # tls_key = /path/to/keyfile - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -# fieldpass = ["used_bytes", "available_bytes", "tx_errors", "rx_errors" ] -# taginclude = ["volume_name","nodeName","namespace","pod_name"] -# Read metrics about docker containers -#[[inputs.docker]] - ## Docker Endpoint - ## To use TCP, set endpoint = "tcp://[ip]:[port]" - ## To use environment variables (ie, docker-machine), set endpoint = "ENV" -# endpoint = "unix:///var/run/host/docker.sock" - - ## Set to true to collect Swarm metrics(desired_replicas, running_replicas) -# gather_services = false - - ## Only collect metrics for these containers, collect all if empty -# container_names = [] - - ## Containers to include and exclude. Globs accepted. - ## Note that an empty array for both will include all containers -# container_name_include = [] -# container_name_exclude = [] - - ## Container states to include and exclude. Globs accepted. - ## When empty only containers in the "running" state will be captured. -# container_state_include = ['*'] - # container_state_exclude = [] - - ## Timeout for docker list, info, and stats commands -# timeout = "5s" - - ## Whether to report for each container per-device blkio (8:0, 8:1...) and - ## network (eth0, eth1, ...) stats or not -# perdevice = true - ## Whether to report for each container total blkio and network stats or not -# total = true - ## Which environment variables should we use as a tag - ##tag_env = ["JAVA_HOME", "HEAP_SIZE"] - - ## docker labels to include and exclude as tags. Globs accepted. - ## Note that an empty array for both will include all labels as tags -# docker_label_include = [] -# docker_label_exclude = [] - - ## Optional TLS Config - # tls_ca = "/etc/telegraf/ca.pem" - # tls_cert = "/etc/telegraf/cert.pem" - # tls_key = "/etc/telegraf/key.pem" - ## Use TLS but skip chain & host verification - # insecure_skip_verify = false -# fieldpass = ["n_containers", "n_containers_running", "n_containers_stopped", "n_containers_paused", "n_images"] - #fieldpass = ["numContainers", "numContainersRunning", "numContainersStopped", "numContainersPaused", "numContainerImages"] -# taginclude = ["nodeName"] - -#[[inputs.procstat]] -# #name_prefix="t.azm.ms/" -# exe = "mdsd" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# [[inputs.procstat]] -# #name_prefix="container.azm.ms/" -# exe = "ruby" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# [[inputs.procstat]] -# #name_prefix="container.azm.ms/" -# exe = "td-agent-bit" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# [[inputs.procstat]] -# #name_prefix="container.azm.ms/" -# exe = "telegraf" -# interval = "10s" -# pid_finder = "native" -# pid_tag = true -# name_override = "agent_telemetry" -# fieldpass = ["cpu_usage", "memory_rss", "memory_swap", "memory_vms", "memory_stack"] -# [inputs.procstat.tags] -# Computer = "$NODE_NAME" -# AgentVersion = "$AGENT_VERSION" -# ControllerType = "$CONTROLLER_TYPE" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" - -#kubelet-1 -# [[inputs.prometheus]] -# name_prefix="container.azm.ms/" -# ## An array of urls to scrape metrics from. -# urls = ["$CADVISOR_METRICS_URL"] -# fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC"] - -# metric_version = 2 -# url_tag = "scrapeUrl" - -# ## An array of Kubernetes services to scrape metrics from. -# # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] - -# ## Kubernetes config file to create client from. -# # kube_config = "/path/to/kubernetes.config" - -# ## Scrape Kubernetes pods for the following prometheus annotations: -# ## - prometheus.io/scrape: Enable scraping for this pod -# ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to -# ## set this to `https` & most likely set the tls config. -# ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. -# ## - prometheus.io/port: If port is not 9102 use this annotation -# # monitor_kubernetes_pods = true - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## OR -# # bearer_token_string = "abc_123" - -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# #tls_cert = /path/to/certfile -# # tls_key = /path/to/keyfile -# ## Use TLS but skip chain & host verification -# insecure_skip_verify = true -# #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -# [inputs.prometheus.tagpass] -# operation_type = ["create_container", "remove_container", "pull_image"] - -# #kubelet-2 -# [[inputs.prometheus]] -# name_prefix="container.azm.ms/" -# ## An array of urls to scrape metrics from. -# urls = ["$CADVISOR_METRICS_URL"] - -# fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] - -# metric_version = 2 -# url_tag = "scrapeUrl" - - -# ## Use bearer token for authorization. ('bearer_token' takes priority) -# bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" -# ## Specify timeout duration for slower prometheus clients (default is 3s) -# response_timeout = "15s" - -# ## Optional TLS Config -# tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -# insecure_skip_verify = true #Prometheus Custom Metrics [[inputs.prometheus]] @@ -727,35 +158,5 @@ # tls_key = /path/to/keyfile ## Use TLS but skip chain & host verification insecure_skip_verify = true - #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] $AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER - -# [[inputs.exec]] -# ## Commands array -# interval = "15m" -# commands = [ -# "/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh" -# ] - -# ## Timeout for each command to complete. -# timeout = "15s" - -# ## measurement name suffix (for separating different commands) -# name_suffix = "_telemetry" - -# ## Data format to consume. -# ## Each data format has its own unique set of configuration options, read -# ## more about them here: -# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md -# data_format = "influx" -# tagexclude = ["hostName"] -# [inputs.exec.tags] -# AgentVersion = "$AGENT_VERSION" -# AKS_RESOURCE_ID = "$TELEMETRY_AKS_RESOURCE_ID" -# ACS_RESOURCE_NAME = "$TELEMETRY_ACS_RESOURCE_NAME" -# Region = "$TELEMETRY_AKS_REGION" -# ClusterName = "$TELEMETRY_CLUSTER_NAME" -# ClusterType = "$TELEMETRY_CLUSTER_TYPE" -# Computer = "placeholder_hostname" -# ControllerType = "$CONTROLLER_TYPE" \ No newline at end of file From e26a3d4cc08aefc6faa8752c41b5491eff403212 Mon Sep 17 00:00:00 2001 From: Rashmi Mysore Chandrashekar Date: Thu, 25 Mar 2021 13:40:51 -0700 Subject: [PATCH 175/175] fixing PR comments --- .../templates/omsagent-daemonset-windows.yaml | 6 ++++++ .../templates/omsagent-deployment.yaml | 11 ++++++++++- kubernetes/omsagent.yaml | 2 -- kubernetes/windows/main.ps1 | 2 +- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 82d210f3d..8868b86bb 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -81,6 +81,12 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: PODNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SIDECAR_SCRAPING_ENABLED + value: "false" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 012dd2720..9b6656e9c 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -72,7 +72,9 @@ spec: value: {{ .Values.Azure.Extension.Name | quote }} {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" + - name: SIDECAR_SCRAPING_ENABLED + value: "false" - name: ISTEST value: {{ .Values.omsagent.ISTEST | quote }} securityContext: @@ -109,6 +111,9 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true livenessProbe: exec: command: @@ -158,4 +163,8 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true {{- end }} diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 1a0b3de80..c25b9bfd4 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -646,7 +646,6 @@ spec: readOnly: true - mountPath: /etc/config/settings/adx name: omsagent-adx-secret - # only in sidecar scraping mode - mountPath: /etc/config/osm-settings name: osm-settings-vol-config readOnly: true @@ -721,7 +720,6 @@ spec: secret: secretName: omsagent-adx-secret optional: true -# only in sidecar scraping mode - name: osm-settings-vol-config configMap: name: container-azm-ms-osmconfig diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 2901cbfb0..95cba2579 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -275,7 +275,7 @@ function Get-ContainerRuntime { function Start-Fluent-Telegraf { - # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service. + # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service and telegraf service. # Run fluent-bit as a background job. Switch this to a windows service once fluent-bit supports natively running as a windows service Start-Job -ScriptBlock { Start-Process -NoNewWindow -FilePath "C:\opt\fluent-bit\bin\fluent-bit.exe" -ArgumentList @("-c", "C:\etc\fluent-bit\fluent-bit.conf", "-e", "C:\opt\omsagentwindows\out_oms.so") }