From 7c6048e4f72d7ccce3831e2637049d196b980f95 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Tue, 4 Aug 2020 17:18:26 -0700 Subject: [PATCH 01/18] separate build yamls for ci_prod branch (#415) (#416) --- ...l.all_tag.all_phase.all_config.ci_prod.yml | 44 +++++++++++++++ ...l.all_tag.all_phase.all_config.ci_prod.yml | 55 +++++++++++++++++++ 2 files changed, 99 insertions(+) create mode 100644 .pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml create mode 100644 .pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml diff --git a/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml new file mode 100644 index 000000000..d47a60ffe --- /dev/null +++ b/.pipelines/pipeline.user.linux.official.all_tag.all_phase.all_config.ci_prod.yml @@ -0,0 +1,44 @@ +environment: + host: + os: 'linux' + flavor: 'ubuntu' + version: '16.04' + runtime: + provider: 'appcontainer' + image: 'cdpxlinux.azurecr.io/user/azure-monitor/container-insights:1.0' + +version: + name: 'DockerProvider' + major: 10 + minor: 0 + tag: 'beta' + system: 'custom' + exclude_commit: true + +restore: + commands: + - !!defaultcommand + name: 'get go modules' + command: '.pipelines/restore-linux.sh' + fail_on_stderr: false + +build: + commands: + - !!defaultcommand + name: 'Build Docker Provider Shell Bundle' + command: '.pipelines/build-linux.sh' + fail_on_stderr: false + +package: + commands: + - !!dockerbuildcommand # REQUIRED: This maps the command data to a concrete type in the CDPX orchestrator. + name: 'Build Docker Image' # REQUIRED: All commands have a name field. All console output captured when + # this command runs is tagged with the value of this field. + context_folder: 'kubernetes/linux' # REQUIRED: The repository root relative path of the folder containing the Dockerfile to build. + # In effect, the context folder will be repository_checkout_folder/src/DockerFinal. + dockerfile_name: 'Dockerfile' # OPTIONAL: The name of the dockerfile. Docker client does allow the Dockerfile + # to be named differently. Defaults to Dockerfile. + # In effect, the -f option value passed to docker build will be repository_checkout_folder/src/DockerFinal/Foo.dockerfile. + repository_name: 'cdpxlinux' # only supported ones are cdpx acr repos + tag: 'ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. + latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. diff --git a/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml new file mode 100644 index 000000000..e0286fbd6 --- /dev/null +++ b/.pipelines/pipeline.user.windows.official.all_tag.all_phase.all_config.ci_prod.yml @@ -0,0 +1,55 @@ +environment: + host: + os: 'windows' + flavor: 'server' + version: '2019' + runtime: + provider: 'appcontainer' + image: 'cdpxwin1809.azurecr.io/user/azure-monitor/container-insights:6.0' + source_mode: 'map' + +version: + name: 'Certificate Generator and Out OMS plugin' + major: 10 + minor: 0 + tag: 'beta' + system: 'custom' + exclude_commit: true + +signing_options: + profile: 'azure' + codesign_validation_glob_pattern: 'regex|.+(?:dll|exe|sys|ps1|psm1|ps1xml|psc1|psd1|cdxml|vbs|js|wsf)$;-:file|**\linux\**' #CSV does not currently support binaries built for linux, so we exclude this folder + +static_analysis_options: + binskim_options: + files_to_scan: + - from: 'build\windows\installer\certificategenerator\bin\' + exclude: # exclude binaries which are referenced via dotnet packages and not built by us + - '**/**/**/BouncyCastle.Crypto.dll' + - '**/**/**/**/BouncyCastle.Crypto.dll' +restore: + commands: + - !!defaultcommand + name: 'Restore dotnet packages' + command: '.pipelines/restore-windows.cmd' + +build: + commands: + - !!defaultcommand + name: 'Build Certificate Generator Source code and Out OMS Go plugin code' + command: '.pipelines/build-windows.cmd' + fail_on_stderr: false + +package: + commands: + - !!dockerbuildcommand # REQUIRED: This maps the command data to a concrete type in the CDPX orchestrator. + name: 'Build Docker Image' # REQUIRED: All commands have a name field. All console output captured when + # this command runs is tagged with the value of this field. + context_folder: 'kubernetes/windows' # REQUIRED: The repository root relative path of the folder containing the Dockerfile to build. + # In effect, the context folder will be repository_checkout_folder/src/DockerFinal. + dockerfile_name: 'Dockerfile' # OPTIONAL: The name of the dockerfile. Docker client does allow the Dockerfile + # to be named differently. Defaults to Dockerfile. + # In effect, the -f option value passed to docker build will be repository_checkout_folder/src/DockerFinal/Foo.dockerfile. + repository_name: 'cdpxwin1809' # only supported ones are cdpx acr repos + tag: 'win-ciprod' # OPTIONAL: Defaults to latest. The tag for the built image. Final tag will be 1.0.0alpha, 1.0.0-timestamp-commitID. + latest: false # OPTIONAL: Defaults to false. If tag is not set to latest and this flag is set, then tag as latest as well and push latest as well. From e347bb3f58f101f97048f584cd2eba6c8cf954e5 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Fri, 7 Aug 2020 16:14:56 -0700 Subject: [PATCH 02/18] [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar --- README.md | 4 +- ReleaseNotes.md | 16 ++++ ReleaseProcess.md | 16 ++-- build/version | 4 +- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 +-- kubernetes/windows/Dockerfile | 31 ++++++- kubernetes/windows/baseimage/Dockerfile | 28 ------- source/plugins/go/src/oms.go | 4 +- source/plugins/ruby/podinventory_to_mdm.rb | 98 +++++++++++----------- 12 files changed, 123 insertions(+), 100 deletions(-) delete mode 100644 kubernetes/windows/baseimage/Dockerfile diff --git a/README.md b/README.md index 06d3606c0..659fe0161 100644 --- a/README.md +++ b/README.md @@ -213,7 +213,7 @@ powershell -ExecutionPolicy bypass # switch to powershell if you are not on pow # Azure DevOps Build Pipeline -Navigate to https://github-private.visualstudio.com/microsoft/_build?view=pipelines to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod (TBD). +Navigate to https://github-private.visualstudio.com/microsoft/_build?view=pipelines to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod. Docker Images will be pushed to CDPX ACR repos and these needs to retagged and pushed to corresponding ACR or docker hub. Only onboarded Azure AD AppId has permission to pull the images from CDPx ACRs. @@ -236,7 +236,7 @@ Here are the instructions to onboard the feature branch to Azure Dev Ops pipelin # Azure DevOps Release Pipeline -Integrated to Azure DevOps release pipeline for the ci_dev and ci_prod (TBD).With this, for every commit to ci_dev branch, latest bits automatically deployded to DEV AKS clusters in Build subscription and similarly for for every commit to ci_prod branch, latest bits automatically deployed to PROD AKS clusters in Build subscription. +Integrated to Azure DevOps release pipeline for the ci_dev and ci_prod.With this, for every commit to ci_dev branch, latest bits automatically deployded to DEV AKS clusters in Build subscription and similarly for for every commit to ci_prod branch, latest bits automatically deployed to PROD AKS clusters in Build subscription. For dev, agent image will be in this format mcr.microsoft.com/azuremonitor/containerinsights/cidev:cidev. For prod, agent will be in this format mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod`
`. diff --git a/ReleaseNotes.md b/ReleaseNotes.md index aa57d8388..0f1d932a8 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,22 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 08/07/2020 - +##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) +##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) +##### Code change log +- Collection of KubeState metrics for deployments and HPA +- Add the Proxy support for Windows agent +- Fix for ContainerState in ContainerInventory to handle Failed state and collection of environment variables for terminated and failed containers +- Change /spec to /metrics/cadvisor endpoint to collect node capacity metrics +- Disable Health Plugin by default and can enabled via configmap +- Pin version of jq to 1.5+dfsg-2 +- Bug fix for showing node as 'not ready' when there is disk pressure +- oneagent integration (disabled by default) +- Add region check before sending alertable metrics to MDM +- Telemetry fix for agent telemetry for sov. clouds + + ### 07/15/2020 - ##### Version microsoft/oms:ciprod07152020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020 (linux) ##### Version microsoft/oms:win-ciprod05262020-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05262020-2 (windows) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 38ff1ab69..19802e22c 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -5,15 +5,21 @@ Here are the high-level instructions to get the CIPROD`
` image for the production release 1. create feature branch from ci_dev and make the following updates > Note: This required since Azure Dev Ops pipeline doesnt support --build-arg yet to automate this. - - Ensure IMAGE_TAG updated with release candiate image tag in the DockerFile under kubernetes/linux and kubernetes/windows directory - - Update omsagent.yaml if there are any changes to the yaml + - Ensure IMAGE_TAG updated with release candiate image tag in the DockerFile under kubernetes/linux and kubernetes/windows directory + - Update the version file under build directory with build version and date + - Update omsagent.yaml for the image tag and dockerProviderVersion, and any other changes + - Update the chart version and image tags in values.yaml under charts/azuremonitor-containers - Release notes 2. Make PR to ci_dev branch and once the PR approved, merge the changes to ci_dev 3. Latest bits of ci_dev automatically deployed to CIDEV cluster in build subscription so just validated E2E to make sure everthing works 4. If everything validated in DEV, make merge PR from ci_dev and ci_prod and merge once this reviewed by dev team -5. Merge ci_dev and ci_prod branch which will trigger automatic deployment of latest bits to CIPROD cluster with CIPROD`
` image (TBD) +6. Update following pipeline variables under ReleaseCandiate with version of chart and image tag + - CIHELMCHARTVERSION # For example, 2.7.4 + - CIImageTagSuffix # ciprod08072020 or ciprod08072020-1 etc. +7. Merge ci_dev and ci_prod branch which will trigger automatic deployment of latest bits to CIPROD cluster with CIPROD`
` image to test and scale cluters, AKS, AKS-Engine > Note: production image automatically pushed to CIPROD Public cloud ACR which will inturn replicated to Public cloud MCR. -6. Validate all the scenarios against CIPROD cluster in Build subscription +8. Validate all the scenarios against clusters in build subscription and scale clusters + # 2. Perf and scale testing @@ -27,7 +33,7 @@ Image automatically synched to MCR CN from Public cloud MCR. ## AKS -Make PR against [AKS-RP](https://msazure.visualstudio.com/CloudNativeCompute/_git/aks-rp?version=GBmaster) repo with chart update(s) +- Refer to internal docs for the release process and instructions. ## ARO v3 diff --git a/build/version b/build/version index b856fc312..f26973116 100644 --- a/build/version +++ b/build/version @@ -5,8 +5,8 @@ CONTAINER_BUILDVERSION_MAJOR=10 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=1 -CONTAINER_BUILDVERSION_DATE=20200526 +CONTAINER_BUILDVERSION_BUILDNR=4 +CONTAINER_BUILDVERSION_DATE=20200805 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 8a84692e7..202494152 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.3 +version: 2.7.4 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 685c767bb..610e109ef 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -7,10 +7,10 @@ omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod07152020" - tagWindows: "win-ciprod05262020-2" + tag: "ciprod08072020" + tagWindows: "win-ciprod08072020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.0.0-3" + dockerProviderVersion: "10.0.0-4" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index c8b61995d..bc27a5384 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod07152020 +ARG IMAGE_TAG=ciprod08072020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 038c7e92b..29533e678 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-3" + dockerProviderVersion: "10.0.0-4" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" imagePullPolicy: IfNotPresent resources: limits: @@ -480,13 +480,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-3" + dockerProviderVersion: "10.0.0-4" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" imagePullPolicy: IfNotPresent resources: limits: @@ -631,13 +631,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-2" + dockerProviderVersion: "10.0.0-4" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05262020-2" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 9a5e22e0d..a18404772 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -1,9 +1,36 @@ -FROM mcr.microsoft.com/azuremonitor/containerinsights/ciprod:winakslogbase-07022020 +FROM mcr.microsoft.com/windows/servercore:ltsc2019 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod05262020-2 +ARG IMAGE_TAG=win-ciprod08072020 + +# Do not split this into multiple RUN! +# Docker creates a layer for every RUN-Statement +RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" +# Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools +RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ +&& choco install -y msys2 --version 20190524.0.0.20191030 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y vim + +# gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update +RUN refreshenv \ +&& ridk install 3 \ +&& echo gem: --no-document >> C:\ProgramData\gemrc \ +&& gem install cool.io -v 1.5.4 --platform ruby \ +&& gem install oj -v 3.3.10 \ +&& gem install json -v 2.2.0 \ +&& gem install fluentd -v 1.10.2 \ +&& gem install win32-service -v 1.0.1 \ +&& gem install win32-ipc -v 0.7.0 \ +&& gem install win32-event -v 0.6.3 \ +&& gem install windows-pr -v 1.2.6 \ +&& gem install tomlrb -v 1.3.0 \ +&& gem install gyoku -v 1.3.1 \ +&& gem sources --clear-all + +# Remove gem cache and chocolatey +RUN powershell -Command "Remove-Item -Force C:\ruby26\lib\ruby\gems\2.6.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" SHELL ["powershell"] diff --git a/kubernetes/windows/baseimage/Dockerfile b/kubernetes/windows/baseimage/Dockerfile deleted file mode 100644 index 122daa9cc..000000000 --- a/kubernetes/windows/baseimage/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM mcr.microsoft.com/windows/servercore:ltsc2019 - -# Do not split this into multiple RUN! -# Docker creates a layer for every RUN-Statement -RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" - -# Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools -RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20190524.0.0.20191030 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ -&& choco install -y vim -RUN refreshenv \ -&& ridk install 2 3 \ -&& echo gem: --no-document >> C:\ProgramData\gemrc \ -&& gem install cool.io -v 1.5.4 --platform ruby \ -&& gem install oj -v 3.3.10 \ -&& gem install json -v 2.2.0 \ -&& gem install fluentd -v 1.10.2 \ -&& gem install win32-service -v 1.0.1 \ -&& gem install win32-ipc -v 0.7.0 \ -&& gem install win32-event -v 0.6.3 \ -&& gem install windows-pr -v 1.2.6 \ -&& gem install tomlrb -v 1.3.0 \ -&& gem install gyoku -v 1.3.1 \ -&& gem sources --clear-all - -# Remove gem cache and chocolatey -RUN powershell -Command "Remove-Item -Force C:\ruby26\lib\ruby\gems\2.6.0\cache\*.gem; Remove-Item -Recurse -Force 'C:\ProgramData\chocolatey'" - diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 88c5641f7..63ca6de10 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1323,9 +1323,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { ContainerLogsRouteV2 = true Log("Routing container logs thru %s route...", ContainerLogsV2Route) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsV2Route) - //} else if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { - //making dormant with below comparison for now -- - } else if strings.Compare("willnot", "match") == 0 { + } else if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { //check if adx clusteruri, clientid & secret are set var err error AdxClusterUri, err = ReadFileContents(PluginConfiguration["adx_cluster_uri_path"]) diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index dd5a15990..834515969 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -93,67 +93,71 @@ def initialize(custom_metrics_azure_regions) end def get_pod_inventory_mdm_records(batch_time) + records = [] begin - # generate all possible values of non_phase_dim_values X pod Phases and zero-fill the ones that are not already present - @no_phase_dim_values_hash.each { |key, value| - @@pod_phase_values.each { |phase| - pod_key = [key, phase].join("~~") - if !@pod_count_hash.key?(pod_key) - @pod_count_hash[pod_key] = 0 - else + if @process_incoming_stream + # generate all possible values of non_phase_dim_values X pod Phases and zero-fill the ones that are not already present + @no_phase_dim_values_hash.each { |key, value| + @@pod_phase_values.each { |phase| + pod_key = [key, phase].join("~~") + if !@pod_count_hash.key?(pod_key) + @pod_count_hash[pod_key] = 0 + else + next + end + } + } + @pod_count_hash.each { |key, value| + key_elements = key.split("~~") + if key_elements.length != 4 next end - } - } - records = [] - @pod_count_hash.each { |key, value| - key_elements = key.split("~~") - if key_elements.length != 4 - next - end - # get dimension values by key - podNodeDimValue = key_elements[0] - podNamespaceDimValue = key_elements[1] - podControllerNameDimValue = key_elements[2] - podPhaseDimValue = key_elements[3] + # get dimension values by key + podNodeDimValue = key_elements[0] + podNamespaceDimValue = key_elements[1] + podControllerNameDimValue = key_elements[2] + podPhaseDimValue = key_elements[3] - record = @@pod_inventory_custom_metrics_template % { - timestamp: batch_time, - metricName: @@pod_count_metric_name, - phaseDimValue: podPhaseDimValue, - namespaceDimValue: podNamespaceDimValue, - nodeDimValue: podNodeDimValue, - controllerNameDimValue: podControllerNameDimValue, - podCountMetricValue: value, + record = @@pod_inventory_custom_metrics_template % { + timestamp: batch_time, + metricName: @@pod_count_metric_name, + phaseDimValue: podPhaseDimValue, + namespaceDimValue: podNamespaceDimValue, + nodeDimValue: podNodeDimValue, + controllerNameDimValue: podControllerNameDimValue, + podCountMetricValue: value, + } + records.push(JSON.parse(record)) } - records.push(JSON.parse(record)) - } - #Add pod metric records - records = MdmMetricsGenerator.appendAllPodMetrics(records, batch_time) + #Add pod metric records + records = MdmMetricsGenerator.appendAllPodMetrics(records, batch_time) - #Send telemetry for pod metrics - timeDifference = (DateTime.now.to_time.to_i - @@metricTelemetryTimeTracker).abs - timeDifferenceInMinutes = timeDifference / 60 - if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - MdmMetricsGenerator.flushPodMdmMetricTelemetry - @@metricTelemetryTimeTracker = DateTime.now.to_time.to_i - end + #Send telemetry for pod metrics + timeDifference = (DateTime.now.to_time.to_i - @@metricTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + MdmMetricsGenerator.flushPodMdmMetricTelemetry + @@metricTelemetryTimeTracker = DateTime.now.to_time.to_i + end - # Clearing out all hashes after telemetry is flushed - MdmMetricsGenerator.clearPodHashes + # Clearing out all hashes after telemetry is flushed + MdmMetricsGenerator.clearPodHashes + end rescue Exception => e @log.info "Error processing pod inventory record Exception: #{e.class} Message: #{e.message}" ApplicationInsightsUtility.sendExceptionTelemetry(e.backtrace) return [] end - @log.info "Pod Count To Phase #{@pod_count_by_phase} " - @log.info "resetting convertor state " - @pod_count_hash = {} - @no_phase_dim_values_hash = {} - @pod_count_by_phase = {} - @pod_uids = {} + if @process_incoming_stream + @log.info "Pod Count To Phase #{@pod_count_by_phase} " + @log.info "resetting convertor state " + @pod_count_hash = {} + @no_phase_dim_values_hash = {} + @pod_count_by_phase = {} + @pod_uids = {} + end return records end From 9615cbce6cfa481a09773299c9892fac47d43db6 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 5 Oct 2020 20:19:43 -0700 Subject: [PATCH 03/18] Merge from ci_dev into ci_prod (#454) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback Co-authored-by: Ganga Mahesh Siddem Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner --- ....sh => push-helm-chart-to-canary-repos.sh} | 24 +- .pipelines/push-helm-chart-to-prod-repos.sh | 53 ++ Kubecon/README.MD | 36 ++ README.md | 58 +- ReleaseNotes.md | 29 + ReleaseProcess.md | 5 +- .../PVUsagePercentage.json | 174 ++++++ build/linux/Makefile | 2 +- build/linux/installer/conf/container.conf | 6 +- build/linux/installer/conf/kube.conf | 8 +- build/linux/installer/conf/telegraf-rs.conf | 42 ++ build/linux/installer/conf/telegraf.conf | 67 ++- .../installer/datafiles/base_container.data | 8 +- .../scripts/tomlparser-mdm-metrics-config.rb | 32 +- .../tomlparser-metric-collection-config.rb | 71 +++ .../scripts/tomlparser-npm-config.rb | 136 +++++ build/version | 6 +- .../installer/conf/fluent-cri-parser.conf | 6 + .../installer/conf/fluent-docker-parser.conf | 5 + build/windows/installer/conf/fluent.conf | 32 +- charts/azuremonitor-containers/Chart.yaml | 4 +- .../templates/NOTES.txt | 4 +- .../templates/omsagent-arc-k8s-crd.yaml | 9 + .../templates/omsagent-crd.yaml | 24 + .../templates/omsagent-daemonset-windows.yaml | 26 +- .../templates/omsagent-daemonset.yaml | 12 +- .../templates/omsagent-deployment.yaml | 12 +- .../templates/omsagent-rbac.yaml | 8 + .../templates/omsagent-rs-configmap.yaml | 8 +- .../templates/omsagent-secret.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 40 +- img/azuremonitor-containers.svg | 66 +++ kubernetes/container-azm-ms-agentconfig.yaml | 19 + kubernetes/linux/Dockerfile | 2 +- .../linux/acrworkflows/acrdevnamespace.yaml | 4 +- .../build-and-publish-docker-image.sh | 2 +- kubernetes/linux/main.sh | 28 +- kubernetes/omsagent.yaml | 59 +- kubernetes/windows/Dockerfile | 5 +- .../build-and-publish-docker-image.ps1 | 2 +- kubernetes/windows/main.ps1 | 228 +++++-- scripts/cluster-creation/README.md | 45 ++ scripts/cluster-creation/aks-engine.sh | 163 +++++ scripts/cluster-creation/arc-k8s-cluster.sh | 190 ++++++ scripts/cluster-creation/aro-v4.sh | 146 +++++ scripts/cluster-creation/onprem-k8s.sh | 106 ++++ .../add-monitoring-metrics-publisher-role.md | 8 +- .../aks/mdmonboarding/mdm_onboarding.sh | 2 +- .../mdmonboarding/mdm_onboarding_atscale.sh | 2 +- .../kubernetes/AddMonitoringOnboardingTags.sh | 2 +- scripts/onboarding/attach-monitoring-tags.md | 8 +- .../onboarding_azuremonitor_for_containers.sh | 2 +- .../onboarding/managed/disable-monitoring.ps1 | 48 +- .../onboarding/managed/disable-monitoring.sh | 57 +- .../onboarding/managed/enable-monitoring.ps1 | 134 +++-- .../onboarding/managed/enable-monitoring.sh | 559 +++++++++--------- .../onboarding/managed/upgrade-monitoring.sh | 314 ++++++++++ scripts/onboarding/solution-onboarding.md | 4 +- .../preview/health/HealthAgentOnboarding.ps1 | 2 +- scripts/troubleshoot/README.md | 12 +- scripts/troubleshoot/TroubleshootError.ps1 | 14 +- .../TroubleshootError_nonAzureK8s.ps1 | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 88 ++- source/plugins/ruby/KubernetesApiClient.rb | 20 +- source/plugins/ruby/MdmAlertTemplates.rb | 34 ++ source/plugins/ruby/MdmMetricsGenerator.rb | 114 +++- .../plugins/ruby/arc_k8s_cluster_identity.rb | 216 +++++++ source/plugins/ruby/constants.rb | 67 ++- source/plugins/ruby/filter_cadvisor2mdm.rb | 76 ++- source/plugins/ruby/in_cadvisor_perf.rb | 1 + source/plugins/ruby/in_kube_podinventory.rb | 3 + source/plugins/ruby/in_win_cadvisor_perf.rb | 1 + .../channel/sender_base.rb | 4 +- source/plugins/ruby/out_mdm.rb | 72 ++- 74 files changed, 3281 insertions(+), 599 deletions(-) rename .pipelines/{push-helm-chart-as-oci-artifact.sh => push-helm-chart-to-canary-repos.sh} (54%) create mode 100644 .pipelines/push-helm-chart-to-prod-repos.sh create mode 100644 Kubecon/README.MD create mode 100644 alerts/recommended_alerts_ARM/PVUsagePercentage.json create mode 100644 build/linux/installer/scripts/tomlparser-metric-collection-config.rb create mode 100644 build/linux/installer/scripts/tomlparser-npm-config.rb create mode 100644 build/windows/installer/conf/fluent-cri-parser.conf create mode 100644 build/windows/installer/conf/fluent-docker-parser.conf create mode 100644 charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml create mode 100644 img/azuremonitor-containers.svg create mode 100644 scripts/cluster-creation/README.md create mode 100644 scripts/cluster-creation/aks-engine.sh create mode 100644 scripts/cluster-creation/arc-k8s-cluster.sh create mode 100644 scripts/cluster-creation/aro-v4.sh create mode 100755 scripts/cluster-creation/onprem-k8s.sh create mode 100644 scripts/onboarding/managed/upgrade-monitoring.sh create mode 100644 source/plugins/ruby/arc_k8s_cluster_identity.rb diff --git a/.pipelines/push-helm-chart-as-oci-artifact.sh b/.pipelines/push-helm-chart-to-canary-repos.sh similarity index 54% rename from .pipelines/push-helm-chart-as-oci-artifact.sh rename to .pipelines/push-helm-chart-to-canary-repos.sh index 50e16e3d0..db8bff56e 100644 --- a/.pipelines/push-helm-chart-as-oci-artifact.sh +++ b/.pipelines/push-helm-chart-to-canary-repos.sh @@ -1,8 +1,9 @@ #!/bin/bash -# push the helm chart as an OCI artifact to specified ACR # working directory of this script should be charts/azuremonitor-containers -export REPO_PATH="batch1/test/azure-monitor-containers" +# note: this repo registered in arc k8s extension for canary region +export REPO_PATH="public/azuremonitor/containerinsights/canary/preview/azuremonitor-containers" + export HELM_EXPERIMENTAL_OCI=1 for ARGUMENT in "$@" @@ -11,13 +12,13 @@ do VALUE=$(echo $ARGUMENT | cut -f2 -d=) case "$KEY" in - CIARCACR) CIARCACR=$VALUE ;; + CIACR) CIACR=$VALUE ;; CICHARTVERSION) CHARTVERSION=$VALUE ;; *) esac done -echo "CI ARC K8S ACR: ${CIARCACR}" +echo "CI ARC K8S ACR: ${CIACR}" echo "CI HELM CHART VERSION: ${CHARTVERSION}" echo "start: read appid and appsecret" @@ -25,18 +26,19 @@ ACR_APP_ID=$(cat ~/acrappid) ACR_APP_SECRET=$(cat ~/acrappsecret) echo "end: read appid and appsecret" -ACR=${CIARCACR} +ACR=${CIACR} + +echo "login to acr:${ACR} using helm" +helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET -echo "login to acr:${ACR} using oras" -oras login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET echo "login to acr:${ACR} completed: ${ACR}" echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" -echo "generate helm package" -helm package . +echo "save the chart locally with acr full path" +helm chart save . ${ACR}/${REPO_PATH}:${CHARTVERSION} -echo "pushing the helm chart as an OCI artifact" -oras push ${ACR}/${REPO_PATH}:${CHARTVERSION} --manifest-config /dev/null:application/vnd.unknown.config.v1+json ./azuremonitor-containers-${CHARTVERSION}.tgz:application/tar+gzip +echo "pushing the helm chart to ACR: ${ACR}" +helm chart push ${ACR}/${REPO_PATH}:${CHARTVERSION} echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" diff --git a/.pipelines/push-helm-chart-to-prod-repos.sh b/.pipelines/push-helm-chart-to-prod-repos.sh new file mode 100644 index 000000000..71aa989de --- /dev/null +++ b/.pipelines/push-helm-chart-to-prod-repos.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# working directory of this script should be charts/azuremonitor-containers + +# this repo used without extension public preview release +export PROD_REPO_PATH="public/azuremonitor/containerinsights/preview/azuremonitor-containers" + +# note: this repo registered in arc k8s extension for prod group1 regions. +export EXTENSION_PROD_REPO_PATH="public/azuremonitor/containerinsights/prod1/preview/azuremonitor-containers" + +export HELM_EXPERIMENTAL_OCI=1 + +for ARGUMENT in "$@" +do + KEY=$(echo $ARGUMENT | cut -f1 -d=) + VALUE=$(echo $ARGUMENT | cut -f2 -d=) + + case "$KEY" in + CIACR) CIACR=$VALUE ;; + CICHARTVERSION) CHARTVERSION=$VALUE ;; + *) + esac +done + +echo "CI ARC K8S ACR: ${CIACR}" +echo "CI HELM CHART VERSION: ${CHARTVERSION}" + +echo "start: read appid and appsecret" +ACR_APP_ID=$(cat ~/acrappid) +ACR_APP_SECRET=$(cat ~/acrappsecret) +echo "end: read appid and appsecret" + +ACR=${CIACR} + +echo "login to acr:${ACR} using helm" +helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET + +echo "login to acr:${ACR} completed: ${ACR}" + +echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" + +echo "save the chart locally with acr full path: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}" +helm chart save . ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION} + +echo "save the chart locally with acr full path: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}" +helm chart save . ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION} + +echo "pushing the helm chart to ACR: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}" +helm chart push ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION} + +echo "pushing the helm chart to ACR: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}" +helm chart push ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION} + +echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" diff --git a/Kubecon/README.MD b/Kubecon/README.MD new file mode 100644 index 000000000..873cfaf9a --- /dev/null +++ b/Kubecon/README.MD @@ -0,0 +1,36 @@ +# Kubecon Azure Monitor for containers lab + +## Overview + +### This Azure Monitor for containers lab will give you hands on experience to monitor AKS workloads. In this lab you will be working Azure Monitor, Log Analytics and Azure Monitor for Container Insights. + +## Instructions for lab + +1. Set-up environment [Setup Guide](https://github.com/rkuehfus/pre-ready-2019-H1/blob/master/Student/Guides/Deployment%20Setup%20Guide.docx?raw=true) + +2. Tasks for the lab + * From your Visual Studio Server, deploy the eShoponWeb application to AKS using Dev Spaces + * From Azure Monitor, locate the container running the eShoponWeb application + * Generate an exception in the eShoponWeb application(Hint: Try to change your password) + * Optimize the Azure Monitor for contains ingestion cost by fine tuning log-collection parameters like std-out/std-error, namespace. + +## Outcome + +### Understand Azure Monitor capabilities, facilitate an Azure Monitor customer conversation, and demo key features of Azure Monitor. + +## Target Audience + +This content has been targeted to devops/SRE intended to build their knowledge on Azure Monitor also for people that have a passion around Monitoring are more than welcome to attend. + +## Prerequisites + 1. Please review the following content before the event + a. [Azure Monitor for containers Overview](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview) + b. [Optimize Azure Monitor for containers cost ](https://medium.com/microsoftazure/azure-monitor-for-containers-optimizing-data-collection-settings-for-cost-ce6f848aca32) + +2. Attendees have access to an Azure Subscription where they can each deploy the provided ARM template that will build a very detailed infrastructure to monitor. This includes the Vnet, subnets, NSG(s), LB(s), NAT rules, scales set and a fully functional .NET Core Application (eShopOnWeb) to monitor. +3. Attendees should have a level 200-300 understanding of the Azure platform. Understand concepts like PowerShell, Azure Cli, ARM, resource groups, RBAC, network, storage, compute, scale sets, virtual machines and security. Previous experience working with ARM templates is recommended. +4. Access to a machine with Visual Studio Code and the Azure PowerShell Modules loaded or Azure CLI. VS Code ARM and PowerShell extensions should be configured. + +![alt text](https://raw.githubusercontent.com/rkuehfus/pre-ready-2019-H1/master/monitoringhackdiagram.png) + + diff --git a/README.md b/README.md index 659fe0161..3eec1f344 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,6 @@ The general directory structure is: │ ├── windows/ - scripts to build the Docker image for Windows Agent │ │ ├── dockerbuild - script to build the code and docker imag, and publish docker image │ │ ├── acrworkflows/ - acr work flows for the Windows Agent container image -│ │ ├── baseimage/ - windowsservercore base image for the windows agent container │ │ ├── DockerFile - DockerFile for Windows Agent Container Image │ │ ├── main.ps1 - Windows Agent container entry point │ │ ├── setup.ps1 - setup file for Windows Agent Container Image @@ -140,7 +139,7 @@ bash ~/Docker-Provider/scripts/build/linux/install-build-pre-requisites.sh ### Build Docker Provider Shell Bundle and Docker Image and Publish Docker Image -> Note: If you are using WSL2, ensure Docker for windows running Linux containers mode to build Linux agent image successfully +> Note: If you are using WSL2, ensure `Docker for windows` running with Linux containers mode on your windows machine to build Linux agent image successfully ``` cd ~/Docker-Provider/kubernetes/linux/dockerbuild @@ -167,9 +166,23 @@ docker push /: ``` ## Windows Agent +To build the windows agent, you will have to build .NET and Go code, and docker image for windows agent. +Docker image for windows agent can only build on Windows machine with `Docker for windows` with Windows containers mode but the .NET code and Go code can be built either on Windows or Linux or WSL2. + ### Install Pre-requisites -If you are planning to build the .net and go code for windows agent on Linux machine and you have already have Docker for Windows on Windows machine, then you may skip this. +Install pre-requisites based on OS platform you will be using to build the windows agent code + +#### Option 1 - Using Windows Machine to Build the Windows agent + +``` +powershell # launch powershell with elevated admin on your windows machine +Set-ExecutionPolicy -ExecutionPolicy bypass # set the execution policy +cd %userprofile%\Docker-Provider\scripts\build\windows # based on your repo path +.\install-build-pre-requisites.ps1 # +``` + +#### Option 2 - Using WSL2 to Build the Windows agent ``` powershell # launch powershell with elevated admin on your windows machine @@ -178,20 +191,36 @@ net use z: \\wsl$\Ubuntu-16.04 # map the network drive of the ubuntu app to wind cd z:\home\sshadmin\Docker-Provider\scripts\build\windows # based on your repo path .\install-build-pre-requisites.ps1 # ``` -#### Build Certificate Generator Source code and Out OMS Go plugin code -> Note: .net and go code for windows agent can built on Ubuntu + +### Build Windows Agent code and Docker Image + +> Note: format of the windows agent imagetag will be `win-ci`. possible values for release are test, dev, preview, dogfood, prod etc. + +#### Option 1 - Using Windows Machine to Build the Windows agent + +Execute below instructions on elevated command prompt to build windows agent code and docker image, publishing the image to acr or docker hub ``` -cd ~/Docker-Provider/build/windows # based on your repo path on ubuntu or WSL2 +cd %userprofile%\Docker-Provider\kubernetes\windows\dockerbuild # based on your repo path +docker login # if you want to publish the image to acr then login to acr via `docker login ` +powershell -ExecutionPolicy bypass # switch to powershell if you are not on powershell already +.\build-and-publish-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr +``` + +#### Option 2 - Using WSL2 to Build the Windows agent + +##### On WSL2, Build Certificate Generator Source code and Out OMS Go plugin code + +``` +cd ~/Docker-Provider/build/windows # based on your repo path on WSL2 Ubuntu app pwsh #switch to powershell .\Makefile.ps1 # trigger build and publish of .net and go code ``` -> Note: format of the imagetag will be `win-ci`. possible values for release are test, dev, preview, dogfood, prod etc. -#### Build and Push Docker Image +#### On Windows machine, build and Push Docker Image -> Note: windows container can only built on windows hence you will have to execute below commands on windows via accessing network share or copying published bits omsagentwindows under kubernetes directory on to windows machine +> Note: Docker image for windows container can only built on windows hence you will have to execute below commands on windows via accessing network share or copying published bits omsagentwindows under kubernetes directory on to windows machine ``` net use z: \\wsl$\Ubuntu-16.04 # map the network drive of the ubuntu app to windows @@ -200,17 +229,6 @@ docker build -t /: --build-arg IMAGE_TAG= . docker push /: ``` -### Build Cert generator, Out OMS Plugun and Docker Image and Publish Docker Image - -If you have code cloned on to windows, you can built everything for windows agent on windows machine via below instructions - -``` -cd %userprofile%\Docker-Provider\kubernetes\windows\dockerbuild # based on your repo path -docker login # if you want to publish the image to acr then login to acr via `docker login ` -powershell -ExecutionPolicy bypass # switch to powershell if you are not on powershell already -.\build-and-publish-docker-image.ps1 -image /: # trigger build code and image and publish docker hub or acr -``` - # Azure DevOps Build Pipeline Navigate to https://github-private.visualstudio.com/microsoft/_build?view=pipelines to see Linux and Windows Agent build pipelines. These pipelines are configured with CI triggers for ci_dev and ci_prod. diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 0f1d932a8..e1892d083 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,35 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 10/05/2020 - +##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) +##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Code change log +- Health CRD to version v1 (from v1beta1) for k8s versions >= 1.19.0 +- Collection of PV usage metrics for PVs mounted by pods (kube-system pods excluded by default)(doc-link-needed) +- Zero fill few custom metrics under a timer, also add zero filling for new PV usage metrics +- Collection of additional Kubelet metrics ('kubelet_running_pod_count','volume_manager_total_volumes','kubelet_node_config_error','process_resident_memory_bytes','process_cpu_seconds_total','kubelet_runtime_operations_total','kubelet_runtime_operations_errors_total'). This also includes updates to 'kubelet' workbook to include these new metrics +- Collection of Azure NPM (Network Policy Manager) metrics (basic & advanced. By default, NPM metrics collection is turned OFF)(doc-link-needed) +- Support log collection when docker root is changed with knode. Tracked by [this](https://github.com/Azure/AKS/issues/1373) issue +- Support for Pods in 'Terminating' state for nodelost scenarios +- Fix for reduction in telemetry for custom metrics ingestion failures +- Fix CPU capacity/limits metrics being 0 for Virtual nodes (VK) +- Add new custom metric regions (eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth) +- Enable strict SSL validation for AppInsights Ruby SDK +- Turn off custom metrics upload for unsupported cluster types +- Install CA certs from wire server for windows (in certain clouds) + +### 09/16/2020 - +> Note: This agent release targetted ONLY for non-AKS clusters via Azure Monitor for containers HELM chart update +##### Version microsoft/oms:ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020 (linux) +##### Version microsoft/oms:win-ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020 (windows) +##### Code change log +- Collection of Azure Network Policy Manager Basic and Advanced metrics +- Add support in Windows Agent for Container log collection of CRI runtimes such as ContainerD +- Alertable metrics support Arc K8s cluster to parity with AKS +- Support for multiple container log mount paths when docker is updated through knode +- Bug fix related to MDM telemetry + ### 08/07/2020 - ##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) ##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 19802e22c..2a3e6001a 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -45,7 +45,10 @@ Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR http ## ARO v4, On-prem K8s, Azure Arc K8s and OpenShift v4 clusters -Make PR against [HELM-charts](https://github.com/helm/charts) with Azure Monitor for containers chart update. +Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly. +Both the agent and helm chart will be replicated to `mcr.microsoft.com`. + +The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required. # 4. Monitor agent roll-out status diff --git a/alerts/recommended_alerts_ARM/PVUsagePercentage.json b/alerts/recommended_alerts_ARM/PVUsagePercentage.json new file mode 100644 index 000000000..e6cdbee15 --- /dev/null +++ b/alerts/recommended_alerts_ARM/PVUsagePercentage.json @@ -0,0 +1,174 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "alertName": { + "type": "string", + "minLength": 1, + "metadata": { + "description": "Name of the alert" + } + }, + "alertDescription": { + "type": "string", + "defaultValue": "This is a metric alert", + "metadata": { + "description": "Description of alert" + } + }, + "alertSeverity": { + "type": "int", + "defaultValue": 3, + "allowedValues": [ + 0, + 1, + 2, + 3, + 4 + ], + "metadata": { + "description": "Severity of alert {0,1,2,3,4}" + } + }, + "isEnabled": { + "type": "bool", + "defaultValue": true, + "metadata": { + "description": "Specifies whether the alert is enabled" + } + }, + "clusterResourceId": { + "type": "string", + "minLength": 1, + "metadata": { + "description": "Full Resource ID of the kubernetes cluster emitting the metric that will be used for the comparison. For example /subscriptions/00000000-0000-0000-0000-0000-00000000/resourceGroups/ResourceGroupName/providers/Microsoft.ContainerService/managedClusters/cluster-xyz" + } + }, + "operator": { + "type": "string", + "defaultValue": "GreaterThan", + "allowedValues": [ + "Equals", + "NotEquals", + "GreaterThan", + "GreaterThanOrEqual", + "LessThan", + "LessThanOrEqual" + ], + "metadata": { + "description": "Operator comparing the current value with the threshold value." + } + }, + "threshold": { + "type": "int", + "defaultValue": 80, + "metadata": { + "description": "The threshold value at which the alert is activated." + }, + "minValue": 1, + "maxValue": 100 + }, + "timeAggregation": { + "type": "string", + "defaultValue": "Average", + "allowedValues": [ + "Average", + "Minimum", + "Maximum", + "Count" + ], + "metadata": { + "description": "How the data that is collected should be combined over time." + } + }, + "windowSize": { + "type": "string", + "defaultValue": "PT5M", + "allowedValues": [ + "PT1M", + "PT5M", + "PT15M", + "PT30M", + "PT1H", + "PT6H", + "PT12H", + "PT24H" + ], + "metadata": { + "description": "Period of time used to monitor alert activity based on the threshold. Must be between one minute and one day. ISO 8601 duration format." + } + }, + "evaluationFrequency": { + "type": "string", + "defaultValue": "PT1M", + "allowedValues": [ + "PT1M", + "PT5M", + "PT15M", + "PT30M", + "PT1H" + ], + "metadata": { + "description": "how often the metric alert is evaluated represented in ISO 8601 duration format" + } + }, + "actionGroupId": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "The ID of the action group that is triggered when the alert is activated or deactivated" + } + } + }, + "variables": {}, + "resources": [ + { + "name": "[parameters('alertName')]", + "type": "Microsoft.Insights/metricAlerts", + "location": "global", + "apiVersion": "2018-03-01", + "tags": {}, + "properties": { + "description": "[parameters('alertDescription')]", + "severity": "[parameters('alertSeverity')]", + "enabled": "[parameters('isEnabled')]", + "scopes": [ + "[parameters('clusterResourceId')]" + ], + "evaluationFrequency": "[parameters('evaluationFrequency')]", + "windowSize": "[parameters('windowSize')]", + "criteria": { + "odata.type": "Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria", + "allOf": [ + { + "name": "1st criterion", + "metricName": "pvUsageExceededPercentage", + "metricNamespace": "Insights.Container/persistentvolumes", + "dimensions": [ + { + "name": "kubernetesNamespace", + "operator": "Include", + "values": [ + "*" + ] + }, + { + "name": "podName", + "operator": "Include", + "values": [ + "*" + ] + } + ], + "operator": "[parameters('operator')]", + "threshold": "[parameters('threshold')]", + "timeAggregation": "[parameters('timeAggregation')]", + "skipMetricValidation": true + } + ] + }, + "actions": "[if(empty(parameters('actionGroupId')), json('null'), json(concat('[{\"actionGroupId\": \"',parameters('actionGroupId'),'\"}]')))]" + } + } + ] +} diff --git a/build/linux/Makefile b/build/linux/Makefile index 0a20ed205..3f35e1204 100644 --- a/build/linux/Makefile +++ b/build/linux/Makefile @@ -118,7 +118,7 @@ distclean : clean PROVIDER_STATUS: @echo "========================= Performing Building provider" @echo "clean up everything under: $(INTERMEDIATE_BASE_DIR) to avoid picking up old binaries" - $(RMDIR) $(INTERMEDIATE_BASE_DIR) + sudo $(RMDIR) $(INTERMEDIATE_BASE_DIR) KIT_STATUS: @echo "========================= Performing Building provider tests" diff --git a/build/linux/installer/conf/container.conf b/build/linux/installer/conf/container.conf index f02ec0131..f7e6e1da9 100644 --- a/build/linux/installer/conf/container.conf +++ b/build/linux/installer/conf/container.conf @@ -45,14 +45,14 @@ #custom_metrics_mdm filter plugin type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes,pvUsedBytes log_level info type filter_telegraf2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level debug diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index 9ada8425f..dbb4db0da 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -13,7 +13,7 @@ tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -66,15 +66,15 @@ type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index f1e9cc282..d81196330 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -611,3 +611,45 @@ $AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER # Computer = "placeholder_hostname" # ControllerType = "$CONTROLLER_TYPE" +##npm +[[inputs.prometheus]] + #name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = $AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER + fielddrop = $AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER + + metric_version = 2 + url_tag = "scrapeUrl" + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + #[inputs.prometheus.tagpass] + # operation_type = ["create_container", "remove_container", "pull_image"] + diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index b554dd4b3..202ac9741 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -627,12 +627,12 @@ # ACSResourceName = "$TELEMETRY_ACS_RESOURCE_NAME" # Region = "$TELEMETRY_AKS_REGION" +#kubelet-1 [[inputs.prometheus]] name_prefix="container.azm.ms/" ## An array of urls to scrape metrics from. urls = ["$CADVISOR_METRICS_URL"] - ## Include "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC" when we add for support for 1.18 - fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC"] + fieldpass = ["$KUBELET_RUNTIME_OPERATIONS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_METRIC", "$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC", "$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC"] metric_version = 2 url_tag = "scrapeUrl" @@ -669,6 +669,28 @@ [inputs.prometheus.tagpass] operation_type = ["create_container", "remove_container", "pull_image"] +#kubelet-2 +[[inputs.prometheus]] + name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = ["$CADVISOR_METRICS_URL"] + + fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] + + metric_version = 2 + url_tag = "scrapeUrl" + + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + insecure_skip_verify = true + + ## prometheus custom metrics [[inputs.prometheus]] @@ -703,6 +725,47 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] +##npm +[[inputs.prometheus]] + #name_prefix="container.azm.ms/" + ## An array of urls to scrape metrics from. + urls = $AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE + + metric_version = 2 + url_tag = "scrapeUrl" + + ## An array of Kubernetes services to scrape metrics from. + # kubernetes_services = ["http://my-service-dns.my-namespace:9100/metrics"] + + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + # monitor_kubernetes_pods = true + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] + #[inputs.prometheus.tagpass] + # operation_type = ["create_container", "remove_container", "pull_image"] + # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index f07e71b2d..ca2538b79 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -50,7 +50,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/omsagent/plugin/kubernetes_container_inventory.rb; source/plugins/ruby/kubernetes_container_inventory.rb; 644; root; root /opt/microsoft/omsagent/plugin/proxy_utils.rb; source/plugins/ruby/proxy_utils.rb; 644; root; root - +/opt/microsoft/omsagent/plugin/arc_k8s_cluster_identity.rb; source/plugins/ruby/arc_k8s_cluster_identity.rb; 644; root; root /opt/microsoft/omsagent/plugin/out_mdm.rb; source/plugins/ruby/out_mdm.rb; 644; root; root /opt/microsoft/omsagent/plugin/filter_cadvisor2mdm.rb; source/plugins/ruby/filter_cadvisor2mdm.rb; 644; root; root /opt/microsoft/omsagent/plugin/filter_telegraf2mdm.rb; source/plugins/ruby/filter_telegraf2mdm.rb; 644; root; root @@ -120,11 +120,13 @@ MAINTAINER: 'Microsoft Corporation' /opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root /opt/tomlparser-prom-customconfig.rb; build/linux/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root /opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root +/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root /opt/tomlparser-health-config.rb; build/linux/installer/scripts/tomlparser-health-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root +/opt/tomlparser-npm-config.rb; build/linux/installer/scripts/tomlparser-npm-config.rb; 755; root; root /opt/microsoft/omsagent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root @@ -275,6 +277,10 @@ touch /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log chmod 666 /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log +touch /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log +chmod 666 /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log +chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log + mv /etc/opt/microsoft/docker-cimprov/container.conf /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf chown omsagent:omsagent /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf diff --git a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb index 1c01dd8c6..345c51633 100644 --- a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -12,6 +12,7 @@ @percentageCpuUsageThreshold = Constants::DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD @percentageMemoryRssThreshold = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD +@percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap @@ -35,7 +36,7 @@ def parseConfigMap # Use the ruby structure created after config parsing to set the right values to be used for MDM metric configuration settings def populateSettingValuesFromConfigMap(parsedConfig) if !parsedConfig.nil? && !parsedConfig[:alertable_metrics_configuration_settings].nil? - # Get mdm metrics config settings for resource utilization + # Get mdm metrics config settings for container resource utilization begin resourceUtilization = parsedConfig[:alertable_metrics_configuration_settings][:container_resource_utilization_thresholds] if !resourceUtilization.nil? @@ -66,7 +67,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) puts "config::Non floating point value or value not convertible to float specified for Memory Working Set threshold, using default " @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD end - puts "config::Using config map settings for MDM metric configuration settings for resource utilization" + puts "config::Using config map settings for MDM metric configuration settings for container resource utilization" end rescue => errorStr ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for resource utilization - #{errorStr}, using defaults, please check config map for errors") @@ -74,6 +75,32 @@ def populateSettingValuesFromConfigMap(parsedConfig) @percentageMemoryRssThreshold = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD end + + # Get mdm metrics config settings for PV utilization + begin + isUsingPVThresholdConfig = false + pvUtilizationThresholds = parsedConfig[:alertable_metrics_configuration_settings][:pv_utilization_thresholds] + if !pvUtilizationThresholds.nil? + pvUsageThreshold = pvUtilizationThresholds[:pv_usage_threshold_percentage] + if !pvUsageThreshold.nil? + pvUsageThresholdFloat = pvUsageThreshold.to_f + if pvUsageThresholdFloat.kind_of? Float + @percentagePVUsageThreshold = pvUsageThresholdFloat + isUsingPVThresholdConfig = true + end + end + end + + if isUsingPVThresholdConfig + puts "config::Using config map settings for MDM metric configuration settings for PV utilization" + else + puts "config::Non floating point value or value not convertible to float specified for PV threshold, using default " + @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for PV utilization - #{errorStr}, using defaults, please check config map for errors") + @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD + end end end @@ -97,6 +124,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export AZMON_ALERT_CONTAINER_CPU_THRESHOLD=#{@percentageCpuUsageThreshold}\n") file.write("export AZMON_ALERT_CONTAINER_MEMORY_RSS_THRESHOLD=#{@percentageMemoryRssThreshold}\n") file.write("export AZMON_ALERT_CONTAINER_MEMORY_WORKING_SET_THRESHOLD=\"#{@percentageMemoryWorkingSetThreshold}\"\n") + file.write("export AZMON_ALERT_PV_USAGE_THRESHOLD=#{@percentagePVUsageThreshold}\n") # Close file after writing all MDM setting environment variables file.close puts "****************End MDM Metrics Config Processing********************" diff --git a/build/linux/installer/scripts/tomlparser-metric-collection-config.rb b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb new file mode 100644 index 000000000..40d87b7f1 --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-metric-collection-config.rb @@ -0,0 +1,71 @@ +#!/usr/local/bin/ruby +# frozen_string_literal: true + +require_relative "tomlrb" +require_relative "ConfigParseErrorLogger" +require_relative "microsoft/omsagent/plugin/constants" + +@configMapMountPath = "/etc/config/settings/metric_collection_settings" +@configVersion = "" +@configSchemaVersion = "" + +# Setting default values which will be used in case they are not set in the configmap or if configmap doesnt exist +@collectPVKubeSystemMetrics = false + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for metric collection settings mounted, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for metric collection settings not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for metric collection settings: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +# Use the ruby structure created after config parsing to set the right values to be used for metric collection settings +def populateSettingValuesFromConfigMap(parsedConfig) + # Get metric collection settings for including or excluding kube-system namespace in PV metrics + begin + if !parsedConfig.nil? && !parsedConfig[:metric_collection_settings][:collect_kube_system_pv_metrics].nil? && !parsedConfig[:metric_collection_settings][:collect_kube_system_pv_metrics][:enabled].nil? + @collectPVKubeSystemMetrics = parsedConfig[:metric_collection_settings][:collect_kube_system_pv_metrics][:enabled] + puts "config::Using config map setting for PV kube-system collection" + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for PV kube-system collection - #{errorStr}, using defaults, please check config map for errors") + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Metric Collection Settings Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version, so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + end +end + +# Write the settings to file, so that they can be set as environment variables +file = File.open("config_metric_collection_env_var", "w") + +if !file.nil? + file.write("export AZMON_PV_COLLECT_KUBE_SYSTEM_METRICS=#{@collectPVKubeSystemMetrics}\n") + # Close file after writing all metric collection setting environment variables + file.close + puts "****************End Metric Collection Settings Processing********************" +else + puts "Exception while opening file for writing MDM metric config environment variables" + puts "****************End Metric Collection Settings Processing********************" +end diff --git a/build/linux/installer/scripts/tomlparser-npm-config.rb b/build/linux/installer/scripts/tomlparser-npm-config.rb new file mode 100644 index 000000000..777fef209 --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-npm-config.rb @@ -0,0 +1,136 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end + +require_relative "ConfigParseErrorLogger" + +@configMapMountPath = "/etc/config/settings/integrations" +@configSchemaVersion = "" +@collect_basic_npm_metrics = false +@collect_advanced_npm_metrics = false +@npm_default_setting = "[]" +@npm_node_urls = "[\"http://$NODE_IP:10091/node-metrics\"]" +@npm_cluster_urls="[\"http://npm-metrics-cluster-service.kube-system:9000/cluster-metrics\"]" +@npm_basic_drop_metrics_cluster = "[\"npm_ipset_counts\"]" +@tgfConfigFileDS = "/etc/opt/microsoft/docker-cimprov/telegraf.conf" +@tgfConfigFileRS = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" +@replicaset = "replicaset" + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for npm metrics found, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map for npm metrics" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for npm metrics not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for npm metrics: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].nil? + advanced_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_advanced_metrics].to_s + puts "config::npm::got:integrations.azure_network_policy_manager.collect_advanced_metrics='#{advanced_npm_metrics}'" + if !advanced_npm_metrics.nil? && advanced_npm_metrics.strip.casecmp("true") == 0 + @collect_advanced_npm_metrics = true + else + @collect_advanced_npm_metrics = false + end + puts "config::npm::set:integrations.azure_network_policy_manager.collect_advanced_metrics=#{@collect_advanced_npm_metrics}" + end + rescue => errorStr + puts "config::npm::error:Exception while reading config settings for npm advanced setting - #{errorStr}, using defaults" + @collect_advanced_npm_metrics = false + end + begin + if !parsedConfig.nil? && !parsedConfig[:integrations].nil? && !parsedConfig[:integrations][:azure_network_policy_manager].nil? && !parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].nil? + basic_npm_metrics = parsedConfig[:integrations][:azure_network_policy_manager][:collect_basic_metrics].to_s + puts "config::npm::got:integrations.azure_network_policy_manager.collect_basic_metrics='#{basic_npm_metrics}'" + if !basic_npm_metrics.nil? && basic_npm_metrics.strip.casecmp("true") == 0 + @collect_basic_npm_metrics = true + else + @collect_basic_npm_metrics = false + end + puts "config::npm::set:integrations.azure_network_policy_manager.collect_basic_metrics=#{@collect_basic_npm_metrics}" + end + rescue => errorStr + puts "config::npm::error:Exception while reading config settings for npm basic setting - #{errorStr}, using defaults" + @collect_basic_npm_metrics = false + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start NPM Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::npm::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + end + @collect_basic_npm_metrics = false + @collect_advanced_npm_metrics = false +end + + + +controller = ENV["CONTROLLER_TYPE"] +tgfConfigFile = @tgfConfigFileDS + +if controller.casecmp(@replicaset) == 0 + tgfConfigFile = @tgfConfigFileRS +end + +#replace place holders in configuration file +tgfConfig = File.read(tgfConfigFile) #read returns only after closing the file + +if @collect_advanced_npm_metrics == true + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE", @npm_node_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER", @npm_cluster_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER", @npm_default_setting) +elsif @collect_basic_npm_metrics == true + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE", @npm_node_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER", @npm_cluster_urls) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER", @npm_basic_drop_metrics_cluster) +else + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_NODE", @npm_default_setting) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_URL_LIST_CLUSTER", @npm_default_setting) + tgfConfig = tgfConfig.gsub("$AZMON_INTEGRATION_NPM_METRICS_DROP_LIST_CLUSTER", @npm_default_setting) +end + +File.open(tgfConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope +puts "config::npm::Successfully substituted the NPM placeholders into #{tgfConfigFile} file for #{controller}" + +# Write the telemetry to file, so that they can be set as environment variables +telemetryFile = File.open("integration_npm_config_env_var", "w") + +if !telemetryFile.nil? + if @collect_advanced_npm_metrics == true + telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED=1\n") + elsif @collect_basic_npm_metrics == true + telemetryFile.write("export TELEMETRY_NPM_INTEGRATION_METRICS_BASIC=1\n") + end + # Close file after writing all environment variables + telemetryFile.close +else + puts "config::npm::Exception while opening file for writing NPM telemetry environment variables" + puts "****************End NPM Config Processing********************" +end diff --git a/build/version b/build/version index f26973116..9587328de 100644 --- a/build/version +++ b/build/version @@ -3,10 +3,10 @@ # Build Version Information CONTAINER_BUILDVERSION_MAJOR=10 -CONTAINER_BUILDVERSION_MINOR=0 +CONTAINER_BUILDVERSION_MINOR=1 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=4 -CONTAINER_BUILDVERSION_DATE=20200805 +CONTAINER_BUILDVERSION_BUILDNR=0 +CONTAINER_BUILDVERSION_DATE=20201005 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/build/windows/installer/conf/fluent-cri-parser.conf b/build/windows/installer/conf/fluent-cri-parser.conf new file mode 100644 index 000000000..86f1572ca --- /dev/null +++ b/build/windows/installer/conf/fluent-cri-parser.conf @@ -0,0 +1,6 @@ + + @type regexp + expression ^(? diff --git a/build/windows/installer/conf/fluent-docker-parser.conf b/build/windows/installer/conf/fluent-docker-parser.conf new file mode 100644 index 000000000..9dc800aeb --- /dev/null +++ b/build/windows/installer/conf/fluent-docker-parser.conf @@ -0,0 +1,5 @@ + + @type json + time_format %Y-%m-%dT%H:%M:%S.%NZ + keep_time_key true + diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index a4cacbcf6..c96300b1e 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -12,11 +12,8 @@ @log_level trace path_key tailed_path limit_recently_modified 5m - - @type json - time_format %Y-%m-%dT%H:%M:%S.%NZ - keep_time_key true - + # if the container runtime is non docker then this will be updated to fluent-cri-parser.conf during container startup + @include fluent-docker-parser.conf @@ -27,11 +24,8 @@ @log_level trace path_key tailed_path read_from_head true - - @type json - time_format %Y-%m-%dT%H:%M:%S.%NZ - keep_time_key true - + # if the container runtime is non docker then this will be updated to fluent-cri-parser.conf during container startup + @include fluent-docker-parser.conf @@ -59,13 +53,13 @@ - overflow_action throw_exception - chunk_limit_size 32k - queued_chunks_limit_size 256 - flush_interval 1 - flush_thread_interval 0.5 - flush_thread_burst_interval 0.01 - flush_thread_count 4 - retry_forever true - + overflow_action throw_exception + chunk_limit_size 32k + queued_chunks_limit_size 256 + flush_interval 1 + flush_thread_interval 0.5 + flush_thread_burst_interval 0.01 + flush_thread_count 4 + retry_forever true + diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 202494152..6d45b05d8 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.4 +version: 2.7.7 kubeVersion: "^1.10.0-0" keywords: - monitoring @@ -28,7 +28,7 @@ keywords: - kubernetes - kuberneteshealth home: https://docs.microsoft.com/en-us/azure/monitoring/monitoring-container-health -icon: https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/img/azuremonitor-containers.svg +icon: https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/img/azuremonitor-containers.svg sources: - https://github.com/microsoft/Docker-Provider/tree/ci_prod maintainers: diff --git a/charts/azuremonitor-containers/templates/NOTES.txt b/charts/azuremonitor-containers/templates/NOTES.txt index 6179b6f1a..372cecb95 100644 --- a/charts/azuremonitor-containers/templates/NOTES.txt +++ b/charts/azuremonitor-containers/templates/NOTES.txt @@ -14,7 +14,7 @@ {{- end }} -{{- if and (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") }} +{{- if and (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") }} ############################################################################## #### ERROR: You did not provide cluster name #### @@ -22,7 +22,7 @@ {{- end }} -{{- if or (eq .Values.omsagent.secret.key "") (eq .Values.omsagent.secret.wsid "") (and (eq .Values.omsagent.env.clusterName "") (eq .Values.omsagent.env.clusterId ""))}} +{{- if or (eq .Values.omsagent.secret.key "") (eq .Values.omsagent.secret.wsid "") (and (eq .Values.omsagent.env.clusterName "") (eq .Values.omsagent.env.clusterId "") (eq .Values.Azure.Cluster.ResourceId "") )}} This deployment will not complete. To proceed, run helm upgrade {{ .Release.Name }} \ diff --git a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml new file mode 100644 index 000000000..ebdd5ea3f --- /dev/null +++ b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml @@ -0,0 +1,9 @@ +{{- if or ( contains "microsoft.kubernetes/connectedclusters" (.Values.Azure.Cluster.ResourceId | lower) ) ( contains "microsoft.kubernetes/connectedclusters" (.Values.omsagent.env.clusterId | lower)) }} +apiVersion: clusterconfig.azure.com/v1beta1 +kind: AzureClusterIdentityRequest +metadata: + name: container-insights-clusteridentityrequest + namespace: azure-arc +spec: + audience: https://monitoring.azure.com/ +{{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-crd.yaml index f4a028bd3..bbaf89a52 100644 --- a/charts/azuremonitor-containers/templates/omsagent-crd.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-crd.yaml @@ -1,3 +1,4 @@ +{{- if semverCompare "<1.19-0" .Capabilities.KubeVersion.GitVersion }} apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: @@ -10,3 +11,26 @@ spec: names: plural: healthstates kind: HealthState +{{- else }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: healthstates.azmon.container.insights + namespace: kube-system +spec: + group: azmon.container.insights + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + state: + type: string + scope: Namespaced + names: + plural: healthstates + kind: HealthState +{{- end }} \ No newline at end of file diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 0ea7a9af6..e65f9a98d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -24,7 +24,16 @@ spec: agentVersion: {{ .Values.omsagent.image.tagWindows }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} spec: +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} + nodeSelector: + kubernetes.io/os: windows +{{- else }} + nodeSelector: + beta.kubernetes.io/os: windows +{{- end }} {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent {{- end }} @@ -46,6 +55,13 @@ spec: - name: AKS_REGION value: {{ .Values.omsagent.env.clusterRegion | quote }} {{- end }} + {{- else if ne .Values.Azure.Cluster.ResourceId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.Azure.Cluster.ResourceId | quote }} + {{- if ne .Values.Azure.Cluster.Region "" }} + - name: AKS_REGION + value: {{ .Values.Azure.Cluster.Region | quote }} + {{- end }} {{- else }} - name: ACS_RESOURCE_NAME value: {{ .Values.omsagent.env.clusterName | quote }} @@ -53,6 +69,13 @@ spec: - name: CONTROLLER_TYPE value: "DaemonSet" - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers @@ -73,9 +96,6 @@ spec: - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd periodSeconds: 60 initialDelaySeconds: 180 - {{- with .Values.omsagent.daemonsetwindows.affinity }} - affinity: {{- toYaml . | nindent 8 }} - {{- end }} {{- with .Values.omsagent.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index d6d6171cd..438294ce5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} apiVersion: apps/v1 kind: DaemonSet metadata: @@ -24,6 +24,9 @@ spec: agentVersion: {{ .Values.omsagent.image.tag }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} + checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent @@ -46,6 +49,13 @@ spec: - name: AKS_REGION value: {{ .Values.omsagent.env.clusterRegion | quote }} {{- end }} + {{- else if ne .Values.Azure.Cluster.ResourceId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.Azure.Cluster.ResourceId | quote }} + {{- if ne .Values.Azure.Cluster.Region "" }} + - name: AKS_REGION + value: {{ .Values.Azure.Cluster.Region | quote }} + {{- end }} {{- else }} - name: ACS_RESOURCE_NAME value: {{ .Values.omsagent.env.clusterName | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 6f8140eb6..8609d25c9 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} apiVersion: apps/v1 kind: Deployment metadata: @@ -25,6 +25,9 @@ spec: agentVersion: {{ .Values.omsagent.image.tag }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} + checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent @@ -47,6 +50,13 @@ spec: - name: AKS_REGION value: {{ .Values.omsagent.env.clusterRegion | quote }} {{- end }} + {{- else if ne .Values.Azure.Cluster.ResourceId "" }} + - name: AKS_RESOURCE_ID + value: {{ .Values.Azure.Cluster.ResourceId | quote }} + {{- if ne .Values.Azure.Cluster.Region "" }} + - name: AKS_REGION + value: {{ .Values.Azure.Cluster.Region | quote }} + {{- end }} {{- else }} - name: ACS_RESOURCE_NAME value: {{ .Values.omsagent.env.clusterName | quote }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index 9903f41ff..4f7408e7c 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -27,8 +27,16 @@ rules: - apiGroups: ["azmon.container.insights"] resources: ["healthstates"] verbs: ["get", "create", "patch"] +- apiGroups: ["clusterconfig.azure.com"] + resources: ["azureclusteridentityrequests"] + resourceNames: ["container-insights-clusteridentityrequest"] + verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["container-insights-clusteridentityrequest-token"] + verbs: ["get"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index c77fb12b4..475b17a46 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} kind: ConfigMap apiVersion: v1 data: @@ -18,7 +18,7 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -70,14 +70,14 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info diff --git a/charts/azuremonitor-containers/templates/omsagent-secret.yaml b/charts/azuremonitor-containers/templates/omsagent-secret.yaml index c6d992b82..1a7f087ed 100644 --- a/charts/azuremonitor-containers/templates/omsagent-secret.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-secret.yaml @@ -1,4 +1,4 @@ -{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId ""))}} +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} apiVersion: v1 kind: Secret metadata: diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 610e109ef..f841dc5d7 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -3,14 +3,19 @@ # Declare variables to be passed into your templates. ## Microsoft OMS Agent image for kubernetes cluster monitoring -## ref: https://github.com/Microsoft/OMS-docker/tree/ci_feature_prod +## ref: https://github.com/microsoft/Docker-Provider/tree/ci_prod +## Values of ResourceId and Region under Azure->Cluster being populated by Azure Arc K8s RP during the installation of the extension +Azure: + Cluster: + Region: + ResourceId: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod08072020" - tagWindows: "win-ciprod08072020" + tag: "ciprod10052020" + tagWindows: "win-ciprod10052020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.1.0-0" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. @@ -51,6 +56,17 @@ omsagent: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - labelSelector: + matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet nodeSelectorTerms: - labelSelector: matchExpressions: @@ -69,7 +85,7 @@ omsagent: nodeSelectorTerms: - labelSelector: matchExpressions: - - key: beta.kubernetes.io/os + - key: kubernetes.io/os operator: In values: - linux @@ -81,17 +97,21 @@ omsagent: operator: NotIn values: - master - daemonsetwindows: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - labelSelector: matchExpressions: - key: beta.kubernetes.io/os operator: In values: - - windows + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/role + operator: NotIn + values: + - master ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## diff --git a/img/azuremonitor-containers.svg b/img/azuremonitor-containers.svg new file mode 100644 index 000000000..b2f7c5323 --- /dev/null +++ b/img/azuremonitor-containers.svg @@ -0,0 +1,66 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index f3f442608..aec1bb456 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -42,6 +42,7 @@ data: # When the setting is set to false, only the kube events with !normal event type will be collected enabled = false # When this is enabled (enabled = true), all kube events including normal events will be collected + prometheus-data-collection-settings: |- # Custom Prometheus metrics data collection settings [prometheus_data_collection_settings.cluster] @@ -90,6 +91,15 @@ data: #fieldpass = ["metric_to_pass1", "metric_to_pass12"] #fielddrop = ["metric_to_drop"] + + metric_collection_settings: |- + # Metrics collection settings for metrics sent to Log Analytics and MDM + [metric_collection_settings.collect_kube_system_pv_metrics] + # In the absense of this configmap, default value for collect_kube_system_pv_metrics is false + # When the setting is set to false, only the persistent volume metrics outside the kube-system namespace will be collected + enabled = false + # When this is enabled (enabled = true), persistent volume metrics including those in the kube-system namespace will be collected + alertable-metrics-configuration-settings: |- # Alertable metrics configuration settings for container resource utilization [alertable_metrics_configuration_settings.container_resource_utilization_thresholds] @@ -100,6 +110,15 @@ data: container_memory_rss_threshold_percentage = 95.0 # Threshold for container memoryWorkingSet, metric will be sent only when memory working set exceeds or becomes equal to the following percentage container_memory_working_set_threshold_percentage = 95.0 + + # Alertable metrics configuration settings for persistent volume utilization + [alertable_metrics_configuration_settings.pv_utilization_thresholds] + # Threshold for persistent volume usage bytes, metric will be sent only when persistent volume utilization exceeds or becomes equal to the following percentage + pv_usage_threshold_percentage = 60.0 + integrations: |- + [integrations.azure_network_policy_manager] + collect_basic_metrics = false + collect_advanced_metrics = false metadata: name: container-azm-ms-agentconfig namespace: kube-system diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index bc27a5384..f4324a18a 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod08072020 +ARG IMAGE_TAG=ciprod10052020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/acrworkflows/acrdevnamespace.yaml b/kubernetes/linux/acrworkflows/acrdevnamespace.yaml index 9270be755..6a3617f6b 100644 --- a/kubernetes/linux/acrworkflows/acrdevnamespace.yaml +++ b/kubernetes/linux/acrworkflows/acrdevnamespace.yaml @@ -1,5 +1,5 @@ version: 1.0-preview-1 steps: - build: -t {{.Run.Registry}}/public/azuremonitor/containerinsights/cidev:{{.Run.Branch}}-{{.Run.Date}}-{{.Run.Commit | substr 0 7 }} . - workingDirectory: ci_feature - - push: ["{{.Run.Registry}}/public/azuremonitor/containerinsights/cidev:{{.Run.Branch}}-{{.Run.Date}}-{{.Run.Commit | substr 0 7 }}"] + workingDirectory: ci_dev + - push: ["{{.Run.Registry}}/public/azuremonitor/containerinsights/cidev:{{.Run.Branch}}-{{.Run.Date}}-{{.Run.Commit | substr 0 7 }}"] diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh index 982c8c491..267f15f32 100644 --- a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh +++ b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh @@ -127,7 +127,7 @@ baseDir=$(dirname $kubernetsDir) buildDir=$baseDir/build/linux dockerFileDir=$baseDir/kubernetes/linux -echo "sour code base directory: $baseDir" +echo "source code base directory: $baseDir" echo "build directory for docker provider: $buildDir" echo "docker file directory: $dockerFileDir" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 92f4977d6..11972f0f4 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -160,7 +160,7 @@ done source config_env_var -#Parse the configmap to set the right environment variables. +#Parse the configmap to set the right environment variables for health feature. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-health-config.rb cat health_config_env_var | while read line; do @@ -169,6 +169,15 @@ cat health_config_env_var | while read line; do done source health_config_env_var +#Parse the configmap to set the right environment variables for network policy manager (npm) integration. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb + +cat integration_npm_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc +done +source integration_npm_config_env_var + #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb @@ -227,6 +236,14 @@ cat config_mdm_metrics_env_var | while read line; do done source config_mdm_metrics_env_var +#Parse the configmap to set the right environment variables for metric collection settings +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + +cat config_metric_collection_env_var | while read line; do + echo $line >> ~/.bashrc +done +source config_metric_collection_env_var + #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" #Defaults to use port 10255 @@ -283,11 +300,10 @@ fi echo "configured container runtime on kubelet is : "$CONTAINER_RUNTIME echo "export CONTAINER_RUNTIME="$CONTAINER_RUNTIME >> ~/.bashrc -# enable these metrics in next agent release -# export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="kubelet_runtime_operations_total" -# echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >> ~/.bashrc -# export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="kubelet_runtime_operations_errors_total" -# echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >> ~/.bashrc +export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="kubelet_runtime_operations_total" +echo "export KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_TOTAL_METRIC >> ~/.bashrc +export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="kubelet_runtime_operations_errors_total" +echo "export KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC="$KUBELET_RUNTIME_OPERATIONS_ERRORS_TOTAL_METRIC >> ~/.bashrc # default to docker metrics export KUBELET_RUNTIME_OPERATIONS_METRIC="kubelet_docker_operations" diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 29533e678..18bc203d4 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -64,7 +64,7 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth #Kubernetes events @@ -117,15 +117,15 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast - metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes + custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth + metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" imagePullPolicy: IfNotPresent resources: limits: @@ -387,6 +387,13 @@ spec: name: host-log - mountPath: /var/lib/docker/containers name: containerlog-path + readOnly: true + - mountPath: /mnt/docker + name: containerlog-path-2 + readOnly: true + - mountPath: /mnt/containers + name: containerlog-path-3 + readOnly: true - mountPath: /etc/kubernetes/host name: azure-json-path - mountPath: /etc/omsagent-secret @@ -412,7 +419,8 @@ spec: nodeSelectorTerms: - labelSelector: matchExpressions: - - key: beta.kubernetes.io/os + # kubernetes.io/os label doesnt exist in k8s versions < 1.14 so make sure to choose label based on k8s version in aks yaml + - key: kubernetes.io/os operator: In values: - linux @@ -444,6 +452,12 @@ spec: - name: containerlog-path hostPath: path: /var/lib/docker/containers + - name: containerlog-path-2 + hostPath: + path: /mnt/docker + - name: containerlog-path-3 + hostPath: + path: /mnt/containers - name: azure-json-path hostPath: path: /etc/kubernetes @@ -480,13 +494,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" imagePullPolicy: IfNotPresent resources: limits: @@ -528,8 +542,6 @@ spec: name: docker-sock - mountPath: /var/log name: host-log - - mountPath: /var/lib/docker/containers - name: containerlog-path - mountPath: /etc/kubernetes/host name: azure-json-path - mountPath: /etc/omsagent-secret @@ -588,9 +600,6 @@ spec: - name: host-log hostPath: path: /var/log - - name: containerlog-path - hostPath: - path: /var/lib/docker/containers - name: azure-json-path hostPath: path: /etc/kubernetes @@ -631,13 +640,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.0.0-4" + dockerProviderVersion: "10.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020" imagePullPolicy: IfNotPresent resources: limits: @@ -660,6 +669,10 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers @@ -733,14 +746,24 @@ spec: port: 25227 targetPort: in-rs-tcp --- -apiVersion: apiextensions.k8s.io/v1beta1 +# this is for versions >=1.19, for versions <1.19 we continue to use v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: healthstates.azmon.container.insights namespace: kube-system spec: group: azmon.container.insights - version: v1 + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + state: + type: string scope: Namespaced names: plural: healthstates diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index a18404772..c7dee60af 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod08072020 +ARG IMAGE_TAG=win-ciprod10052020 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement @@ -56,6 +56,9 @@ COPY ./omsagentwindows/out_oms.so /opt/omsagentwindows/out_oms.so # copy fluent, fluent-bit and out_oms conf files COPY ./omsagentwindows/installer/conf/fluent.conf /etc/fluent/ +# copy fluent docker and cri parser conf files +COPY ./omsagentwindows/installer/conf/fluent-cri-parser.conf /etc/fluent/ +COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows diff --git a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 index 27be90d48..dbcfa6097 100644 --- a/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 +++ b/kubernetes/windows/dockerbuild/build-and-publish-docker-image.ps1 @@ -35,7 +35,7 @@ $imagerepo = $imageparts[0] if ($imagetag.StartsWith("win-") -eq $false) { Write-Host "adding win- prefix image tag since its not provided" - $imagetag = "win"-$imagetag + $imagetag = "win-$imagetag" } Write-Host "image tag used is :$imagetag" diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index b7ddfa8e7..2e8659601 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -1,34 +1,51 @@ -function Confirm-WindowsServiceExists($name) -{ - if (Get-Service $name -ErrorAction SilentlyContinue) +Add-Type @" + using System; + using System.Net; + using System.Net.Security; + using System.Security.Cryptography.X509Certificates; + public class ServerCertificateValidationCallback { + public static void Ignore() + { + ServicePointManager.ServerCertificateValidationCallback += + delegate + ( + Object obj, + X509Certificate certificate, + X509Chain chain, + SslPolicyErrors errors + ) + { + return true; + }; + } + } +"@ +function Confirm-WindowsServiceExists($name) { + if (Get-Service $name -ErrorAction SilentlyContinue) { return $true } return $false } -function Remove-WindowsServiceIfItExists($name) -{ +function Remove-WindowsServiceIfItExists($name) { $exists = Confirm-WindowsServiceExists $name - if ($exists) - { + if ($exists) { sc.exe \\server delete $name } } -function Start-FileSystemWatcher -{ +function Start-FileSystemWatcher { Start-Process powershell -NoNewWindow .\filesystemwatcher.ps1 } #register fluentd as a windows service -function Set-EnvironmentVariables -{ +function Set-EnvironmentVariables { $domain = "opinsights.azure.com" if (Test-Path /etc/omsagent-secret/DOMAIN) { # TODO: Change to omsagent-secret before merging - $domain = Get-Content /etc/omsagent-secret/DOMAIN + $domain = Get-Content /etc/omsagent-secret/DOMAIN } # Set DOMAIN @@ -38,7 +55,7 @@ function Set-EnvironmentVariables $wsID = "" if (Test-Path /etc/omsagent-secret/WSID) { # TODO: Change to omsagent-secret before merging - $wsID = Get-Content /etc/omsagent-secret/WSID + $wsID = Get-Content /etc/omsagent-secret/WSID } # Set DOMAIN @@ -48,7 +65,7 @@ function Set-EnvironmentVariables $wsKey = "" if (Test-Path /etc/omsagent-secret/KEY) { # TODO: Change to omsagent-secret before merging - $wsKey = Get-Content /etc/omsagent-secret/KEY + $wsKey = Get-Content /etc/omsagent-secret/KEY } # Set KEY @@ -58,7 +75,7 @@ function Set-EnvironmentVariables $proxy = "" if (Test-Path /etc/omsagent-secret/PROXY) { # TODO: Change to omsagent-secret before merging - $proxy = Get-Content /etc/omsagent-secret/PROXY + $proxy = Get-Content /etc/omsagent-secret/PROXY Write-Host "Validating the proxy configuration since proxy configuration provided" # valide the proxy endpoint configuration if (![string]::IsNullOrEmpty($proxy)) { @@ -66,26 +83,22 @@ function Set-EnvironmentVariables if (![string]::IsNullOrEmpty($proxy)) { $proxy = [string]$proxy.Trim(); $parts = $proxy -split "@" - if ($parts.Length -ne 2) - { + if ($parts.Length -ne 2) { Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." exit 1 } $subparts1 = $parts[0] -split "//" - if ($subparts1.Length -ne 2) - { + if ($subparts1.Length -ne 2) { Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." exit 1 } $protocol = $subparts1[0].ToLower().TrimEnd(":") - if (!($protocol -eq "http") -and !($protocol -eq "https")) - { + if (!($protocol -eq "http") -and !($protocol -eq "https")) { Write-Host "Unsupported protocol in ProxyConfiguration $($proxy). EXITING....." exit 1 } $subparts2 = $parts[1] -split ":" - if ($subparts2.Length -ne 2) - { + if ($subparts2.Length -ne 2) { Write-Host "Invalid ProxyConfiguration $($proxy). EXITING....." exit 1 } @@ -118,46 +131,175 @@ function Set-EnvironmentVariables .\setenv.ps1 } -function Start-Fluent -{ +function Get-ContainerRuntime { + # default container runtime and make default as containerd when containerd becomes default in AKS + $containerRuntime = "docker" + $response = "" + $NODE_IP = "" + try { + if (![string]::IsNullOrEmpty([System.Environment]::GetEnvironmentVariable("NODE_IP", "PROCESS"))) { + $NODE_IP = [System.Environment]::GetEnvironmentVariable("NODE_IP", "PROCESS") + } + elseif (![string]::IsNullOrEmpty([System.Environment]::GetEnvironmentVariable("NODE_IP", "USER"))) { + $NODE_IP = [System.Environment]::GetEnvironmentVariable("NODE_IP", "USER") + } + elseif (![string]::IsNullOrEmpty([System.Environment]::GetEnvironmentVariable("NODE_IP", "MACHINE"))) { + $NODE_IP = [System.Environment]::GetEnvironmentVariable("NODE_IP", "MACHINE") + } + + if (![string]::IsNullOrEmpty($NODE_IP)) { + $isPodsAPISuccess = $false + Write-Host "Value of NODE_IP environment variable : $($NODE_IP)" + try { + Write-Host "Making API call to http://$($NODE_IP):10255/pods" + $response = Invoke-WebRequest -uri http://$($NODE_IP):10255/pods -UseBasicParsing + Write-Host "Response status code of API call to http://$($NODE_IP):10255/pods : $($response.StatusCode)" + } + catch { + Write-Host "API call to http://$($NODE_IP):10255/pods failed" + } + + if (![string]::IsNullOrEmpty($response) -and $response.StatusCode -eq 200) { + Write-Host "API call to http://$($NODE_IP):10255/pods succeeded" + $isPodsAPISuccess = $true + } + else { + try { + Write-Host "Making API call to https://$($NODE_IP):10250/pods" + # ignore certificate validation since kubelet uses self-signed cert + [ServerCertificateValidationCallback]::Ignore() + $response = Invoke-WebRequest -Uri https://$($NODE_IP):10250/pods -Headers @{'Authorization' = "Bearer $(Get-Content /var/run/secrets/kubernetes.io/serviceaccount/token)" } -UseBasicParsing + Write-Host "Response status code of API call to https://$($NODE_IP):10250/pods : $($response.StatusCode)" + if (![string]::IsNullOrEmpty($response) -and $response.StatusCode -eq 200) { + Write-Host "API call to https://$($NODE_IP):10250/pods succeeded" + $isPodsAPISuccess = $true + } + } + catch { + Write-Host "API call to https://$($NODE_IP):10250/pods failed" + } + } + + if ($isPodsAPISuccess) { + if (![string]::IsNullOrEmpty($response.Content)) { + $podList = $response.Content | ConvertFrom-Json + if (![string]::IsNullOrEmpty($podList)) { + $podItems = $podList.Items + if ($podItems.Length -gt 0) { + Write-Host "found pod items: $($podItems.Length)" + for ($index = 0; $index -le $podItems.Length ; $index++) { + Write-Host "current podItem index : $($index)" + $pod = $podItems[$index] + if (![string]::IsNullOrEmpty($pod) -and + ![string]::IsNullOrEmpty($pod.status) -and + ![string]::IsNullOrEmpty($pod.status.phase) -and + $pod.status.phase -eq "Running" -and + $pod.status.ContainerStatuses.Length -gt 0) { + $containerID = $pod.status.ContainerStatuses[0].containerID + $detectedContainerRuntime = $containerID.split(":")[0].trim() + Write-Host "detected containerRuntime as : $($detectedContainerRuntime)" + if (![string]::IsNullOrEmpty($detectedContainerRuntime) -and [string]$detectedContainerRuntime.StartsWith('docker') -eq $false) { + $containerRuntime = $detectedContainerRuntime + } + Write-Host "using containerRuntime as : $($containerRuntime)" + break + } + } + } + else { + Write-Host "got podItems count is 0 hence using default container runtime: $($containerRuntime)" + } + + + } + else { + Write-Host "got podList null or empty hence using default container runtime: $($containerRuntime)" + } + } + else { + Write-Host "got empty response content for /Pods API call hence using default container runtime: $($containerRuntime)" + } + } + } + else { + Write-Host "got empty NODE_IP environment variable" + } + # set CONTAINER_RUNTIME env for debug and telemetry purpose + [System.Environment]::SetEnvironmentVariable("CONTAINER_RUNTIME", $containerRuntime, "Process") + [System.Environment]::SetEnvironmentVariable("CONTAINER_RUNTIME", $containerRuntime, "Machine") + } + catch { + $e = $_.Exception + Write-Host $e + Write-Host "exception occured on getting container runtime hence using default container runtime: $($containerRuntime)" + } + + return $containerRuntime +} + +function Start-Fluent { + # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service. # Run fluent-bit as a background job. Switch this to a windows service once fluent-bit supports natively running as a windows service Start-Job -ScriptBlock { Start-Process -NoNewWindow -FilePath "C:\opt\fluent-bit\bin\fluent-bit.exe" -ArgumentList @("-c", "C:\etc\fluent-bit\fluent-bit.conf", "-e", "C:\opt\omsagentwindows\out_oms.so") } + $containerRuntime = Get-ContainerRuntime + #register fluentd as a service and start # there is a known issues with win32-service https://github.com/chef/win32-service/issues/70 + if (![string]::IsNullOrEmpty($containerRuntime) -and [string]$containerRuntime.StartsWith('docker') -eq $false) { + # change parser from docker to cri if the container runtime is not docker + Write-Host "changing parser from Docker to CRI since container runtime : $($containerRuntime) and which is non-docker" + (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf','fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf + } + fluentd --reg-winsvc i --reg-winsvc-auto-start --winsvc-name fluentdwinaks --reg-winsvc-fluentdopt '-c C:/etc/fluent/fluent.conf -o C:/etc/fluent/fluent.log' Notepad.exe | Out-Null } -function Generate-Certificates -{ +function Generate-Certificates { Write-Host "Generating Certificates" C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe } -function Test-CertificatePath -{ +function Bootstrap-CACertificates { + try { + # This is required when the root CA certs are different for some clouds. + $caCerts=Invoke-WebRequest 'http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json' -UseBasicParsing | ConvertFrom-Json + if (![string]::IsNullOrEmpty($caCerts)) { + $certificates = $caCerts.Certificates + for ($index = 0; $index -lt $certificates.Length ; $index++) { + $name=$certificates[$index].Name + $certificates[$index].CertBody > $name + Write-Host "name: $($name)" + Import-Certificate -FilePath .\$name -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose + } + } + } + catch { + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in Bootstrap-CACertificates..." + } +} + +function Test-CertificatePath { $certLocation = $env:CI_CERT_LOCATION - $keyLocation = $env:CI_KEY_LOCATION - if (!(Test-Path $certLocation)) - { + $keyLocation = $env:CI_KEY_LOCATION + if (!(Test-Path $certLocation)) { Write-Host "Certificate file not found at $($certLocation). EXITING....." exit 1 } - else - { + else { Write-Host "Certificate file found at $($certLocation)" } - if (! (Test-Path $keyLocation)) - { + if (! (Test-Path $keyLocation)) { Write-Host "Key file not found at $($keyLocation). EXITING...." exit 1 } - else - { + else { Write-Host "Key file found at $($keyLocation)" } } @@ -167,12 +309,20 @@ Start-Transcript -Path main.txt Remove-WindowsServiceIfItExists "fluentdwinaks" Set-EnvironmentVariables Start-FileSystemWatcher + +#Bootstrapping CA certs for non public clouds and AKS clusters +$aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") +if (![string]::IsNullOrEmpty($aksResourceId) -and $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) +{ + Bootstrap-CACertificates +} + Generate-Certificates Test-CertificatePath Start-Fluent # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 -Get-WmiObject Win32_process | Where-Object {$_.Name -match 'powershell'} | Format-Table -Property Name, CommandLine, ProcessId +Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId #check if fluentd service is running Get-Service fluentdwinaks diff --git a/scripts/cluster-creation/README.md b/scripts/cluster-creation/README.md new file mode 100644 index 000000000..57d0c5dbf --- /dev/null +++ b/scripts/cluster-creation/README.md @@ -0,0 +1,45 @@ +# Instructions to create k8s clusters + +## On-Prem K8s Cluster + +on-prem k8s cluster can be created on any VM or physical machine using kind. + +``` +bash onprem-k8s.sh --cluster-name +``` + +## AKS-Engine cluster + +aks-engine is unmanaged cluster in azure and you can use below command to create the cluster in azure. + +``` + +# Either you can reuse existing service principal or create one with below instructions +subscriptionId="" +az account set -s ${subscriptionId} +sp=$(az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/${subscriptionId}") +# get the appId (i.e. clientid) and password (i.e. clientSecret) +echo $sp + +clientId=$(echo $sp | jq '.appId') +clientSecret=$(echo $sp | jq '.password') + +# create the aks-engine +bash aks-engine.sh --subscription-id "" --client-id "" --client-secret "" --dns-prefix "" --location "" +``` + +## ARO v4 Cluster + +Azure Redhat Openshift v4 cluster can be created with below command. + +> Note: Because of the cleanup policy on internal subscriptions, cluster creation can fail if you dont change cleanup service to none on the subnets of aro vnet before creation. +``` +bash aro-v4.sh --subscription-id "" --resource-group "" --cluster-name "" --location "" +``` +## Azure Arc K8s cluster + +you can connect on-prem k8s cluster or unmanaged k8s cluster such as aks-engine to azure through azure arc. + +``` +bash arc-k8s-cluster.sh --subscription-id "" --resource-group "" --cluster-name "" --location "" --kube-context "" +``` diff --git a/scripts/cluster-creation/aks-engine.sh b/scripts/cluster-creation/aks-engine.sh new file mode 100644 index 000000000..9d287ea07 --- /dev/null +++ b/scripts/cluster-creation/aks-engine.sh @@ -0,0 +1,163 @@ +#!/bin/bash +set -e +TEMP_DIR=temp-$RANDOM +DEFAULT_ONPREM_K8S_CLUSTER="aks-engine-k8s-test" +AKS_ENGINE_VERSION="v0.54.0" + +download-aks-engine() +{ + sudo curl -LO https://github.com/Azure/aks-engine/releases/download/${AKS_ENGINE_VERSION}/aks-engine-v0.54.0-linux-amd64.tar.gz + sudo tar -xvf aks-engine-${AKS_ENGINE_VERSION}-linux-amd64.tar.gz + sudo mv aks-engine-${AKS_ENGINE_VERSION}-linux-amd64 aks-engine + sudo mv -f aks-engine/aks-engine /usr/local/bin +} + + +usage() +{ + local basename=`basename $0` + echo + echo "create aks-engine cluster:" + echo "$basename deploy --subscription-id --client-id --client-secret --dns-prefix --location " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--subscription-id") set -- "$@" "-s" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-w" ;; + "--dns-prefix") set -- "$@" "-d" ;; + "--location") set -- "$@" "-l" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hs:c:w:d:l:' opt; do + case "$opt" in + h) + usage + ;; + + s) + subscriptionId="$OPTARG" + echo "subscriptionId is $OPTARG" + ;; + + c) + clientId="$OPTARG" + echo "clientId is $OPTARG" + ;; + + w) + clientSecret="$OPTARG" + echo "clientSecret is $OPTARG" + ;; + + d) + dnsPrefix="$OPTARG" + echo "dnsPrefix is $OPTARG" + ;; + + l) + location="$OPTARG" + echo "location is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" + + +} +create_cluster() +{ + +sudo touch kubernetes.json +sudo chmod 777 kubernetes.json +# For docker runtime, remove kubernetesConfig block +cat >> kubernetes.json < --resource-group --cluster-name --location --kube-context " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--subscription-id") set -- "$@" "-s" ;; + "--resource-group") set -- "$@" "-r" ;; + "--cluster-name") set -- "$@" "-c" ;; + "--location") set -- "$@" "-l" ;; + "--kube-context") set -- "$@" "-k" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hs:r:c:l:k:' opt; do + case "$opt" in + h) + usage + ;; + + s) + subscriptionId="$OPTARG" + echo "subscriptionId is $OPTARG" + ;; + + r) + resourceGroupName="$OPTARG" + echo "resourceGroupName is $OPTARG" + ;; + + c) + clusterName="$OPTARG" + echo "clusterName is $OPTARG" + ;; + + l) + location="$OPTARG" + echo "location is $OPTARG" + ;; + + k) + kubecontext="$OPTARG" + echo "kubecontext is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" + + +} + +connect_azure_arc_k8s() +{ + + echo "create resource group: ${resourceGroupName} if it doenst exist" + isrgExists=$(az group exists -g ${resourceGroupName}) + if $isrgExists; then + echo "resource group: ${resourceGroupName} already exists" + else + echo "creating resource group ${resourceGroupName} in region since it doesnt exist" + az group create -l ${location} -n ${resourceGroupName} + fi + + echo "connecting k8s cluster with kube-context : ${kubecontext} to azure with clustername: ${clusterName} and resourcegroup: ${resourceGroupName} ..." + az connectedk8s connect --name ${clusterName} --resource-group ${resourceGroupName} + echo "connecting k8s cluster with kube-context : ${kubecontext} to azure with clustername: ${clusterName} and resourcegroup: ${resourceGroupName} completed." +} + + + +echo "connecting k8s cluster to azure arc..." +echo "HELM version: ${HELM_VERSION}" +cd ~ +echo "creating temp directory":$TEMP_DIR +sudo mkdir $TEMP_DIR && cd $TEMP_DIR + +echo "validate args" +parse_args $@ + +echo "set the ${DefaultCloud} for azure cli" +az cloud set -n $DefaultCloud + +echo "login to azure cli" +az login --use-device-code + +echo "set the subscription ${subscriptionId} for cli" +az account set -s $subscriptionId + +echo "installing helm client ..." +install-helm +echo "installing helm client completed." + +echo "installing azure cli ..." +download-and-install-azure-cli +echo "installing azure cli completed." + +echo "installing arc k8s extensions and pre-requisistes ..." +install_arc_k8s_prerequisites +echo "installing arc k8s extensions and pre-requisites completed." + +echo "connecting cluster to azure arc k8s via azure arc " +connect_azure_arc_k8s +echo "connecting cluster to azure arc k8s via azure arc completed." + +echo "connecting k8s cluster to azure arc completed." diff --git a/scripts/cluster-creation/aro-v4.sh b/scripts/cluster-creation/aro-v4.sh new file mode 100644 index 000000000..8540ae931 --- /dev/null +++ b/scripts/cluster-creation/aro-v4.sh @@ -0,0 +1,146 @@ +#!/bin/bash +set -e +TEMP_DIR=temp-$RANDOM +DefaultCloud="AzureCloud" +DefaultVnetName="aro-net" +DefaultMasterSubnetName="master-subnet" +DefaultWorkerSubnetName="worker-subnet" + +download-and-install-azure-cli() +{ + # https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-apt?view=azure-cli-latest#install-with-one-command + sudo curl -sL https://aka.ms/InstallAzureCLIDeb | sudo bash +} + +register_aro_v4_provider() +{ + echo "register Microsoft.RedHatOpenShift provider" + az provider register -n Microsoft.RedHatOpenShift --wait +} + +usage() +{ + local basename=`basename $0` + echo + echo "create aro v4 cluster:" + echo "$basename --subscription-id --resource-group --cluster-name --location " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--subscription-id") set -- "$@" "-s" ;; + "--resource-group") set -- "$@" "-r" ;; + "--cluster-name") set -- "$@" "-c" ;; + "--location") set -- "$@" "-l" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hs:r:c:l:' opt; do + case "$opt" in + h) + usage + ;; + + s) + subscriptionId="$OPTARG" + echo "subscriptionId is $OPTARG" + ;; + + r) + resourceGroupName="$OPTARG" + echo "resourceGroupName is $OPTARG" + ;; + + c) + clusterName="$OPTARG" + echo "clusterName is $OPTARG" + ;; + + l) + location="$OPTARG" + echo "location is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" +} + +create_aro_v4_cluster() +{ + + echo "create resource group: ${resourceGroupName} if it doenst exist" + isrgExists=$(az group exists -g ${resourceGroupName}) + if $isrgExists; then + echo "resource group: ${resourceGroupName} already exists" + else + echo "creating resource group ${resourceGroupName} in region since it doesnt exist" + az group create -l ${location} -n ${resourceGroupName} + fi + + echo "creating virtual network" + az network vnet create --resource-group ${resourceGroupName} --name ${DefaultVnetName} --address-prefixes 10.0.0.0/22 + + echo "adding empty subnet for master nodes" + az network vnet subnet create --resource-group ${resourceGroupName} --vnet-name ${DefaultVnetName} --name ${DefaultMasterSubnetName} --address-prefixes 10.0.0.0/23 --service-endpoints Microsoft.ContainerRegistry + + echo "adding empty subnet for worker nodes" + az network vnet subnet create --resource-group ${resourceGroupName} --vnet-name ${DefaultVnetName} --name ${DefaultWorkerSubnetName} --address-prefixes 10.0.2.0/23 --service-endpoints Microsoft.ContainerRegistry + + echo "Please make sure disable to diable cleanup service on subnet nsgs of aor vnet for internal subscriptions" + sleep 1m + + echo "Disable subnet private endpoint policies on the master subnet" + az network vnet subnet update --name ${DefaultMasterSubnetName} --resource-group ${resourceGroupName} --vnet-name ${DefaultVnetName} --disable-private-link-service-network-policies true + + echo "creating ARO v4 cluster" + az aro create --resource-group ${resourceGroupName} --name ${clusterName} --vnet ${DefaultVnetName} --master-subnet ${DefaultMasterSubnetName} --worker-subnet ${DefaultWorkerSubnetName} + +} + + +echo "creating aro v4 cluster in specified azure subscription and resource group..." +cd ~ +echo "creating temp directory":$TEMP_DIR +sudo mkdir $TEMP_DIR && cd $TEMP_DIR + +echo "validate args" +parse_args $@ + +echo "set the ${DefaultCloud} for azure cli" +az cloud set -n $DefaultCloud + +echo "login to azure cli" +az login --use-device-code + +echo "set the subscription ${subscriptionId} for cli" +az account set -s $subscriptionId + +echo "installing azure cli ..." +download-and-install-azure-cli +echo "installing azure cli completed." + +echo "creating aro v4 cluster ..." +create_aro_v4_cluster +echo "creating aro v4 cluster completed." + +echo "creating aro v4 cluster in specified azure subscription and resource completed." diff --git a/scripts/cluster-creation/onprem-k8s.sh b/scripts/cluster-creation/onprem-k8s.sh new file mode 100755 index 000000000..147681133 --- /dev/null +++ b/scripts/cluster-creation/onprem-k8s.sh @@ -0,0 +1,106 @@ +#!/bin/bash +set -e +TEMP_DIR=temp-$RANDOM +KIND_VERSION="v0.8.1" + +install-kind() +{ +sudo curl -Lo ./kind https://kind.sigs.k8s.io/dl/${KIND_VERSION}/kind-linux-amd64 +sudo chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind +} + +download_install_docker() +{ + echo "download docker script" + sudo curl -L https://get.docker.com/ -o get-docker.sh + echo "installing docker script" + sudo sh get-docker.sh + + echo "add user to docker group" + sudo usermod -aG docker $USER + +} + +create_cluster() +{ +sudo touch kind-config.yaml +sudo chmod 777 kind-config.yaml +cat >> kind-config.yaml < " +} + +parse_args() +{ + + if [ $# -le 1 ] + then + usage + exit 1 + fi + +# Transform long options to short ones +for arg in "$@"; do + shift + case "$arg" in + "--cluster-name") set -- "$@" "-c" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" + esac +done + +local OPTIND opt + +while getopts 'hc:' opt; do + case "$opt" in + h) + usage + ;; + + c) + clusterName="$OPTARG" + echo "clusterName is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND -1))" +} + +echo "creating kind k8 cluster ..." +echo "KIND version: ${KIND_VERSION}" +cd ~ +echo "creating temp directory":$TEMP_DIR +sudo mkdir $TEMP_DIR && cd $TEMP_DIR + +echo "parsing args" +parse_args $@ + +echo "download and install docker" +download_install_docker + +echo "download and install kind" +install-kind + +echo "creating cluster: ${clusterName}" +create_cluster + +echo "creating kind k8 cluster completed." diff --git a/scripts/onboarding/add-monitoring-metrics-publisher-role.md b/scripts/onboarding/add-monitoring-metrics-publisher-role.md index 822ff0f64..91b91d872 100644 --- a/scripts/onboarding/add-monitoring-metrics-publisher-role.md +++ b/scripts/onboarding/add-monitoring-metrics-publisher-role.md @@ -16,7 +16,7 @@ Of the built-in roles, only Owner and User Access Administrator are granted acce ### For single AKS cluster using Azure CLI ``` sh -curl -sL https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/aks/mdmonboarding/mdm_onboarding.sh | bash -s +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aks/mdmonboarding/mdm_onboarding.sh | bash -s ``` The configuration change can take a few minutes to complete. When it finishes, you see a message similar to the following that includes the result: @@ -28,7 +28,7 @@ completed the role assignment ### For all AKS clusters in the specified subscription using Azure CLI ``` sh -curl -sL https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/aks/mdmonboarding/mdm_onboarding_atscale.sh | bash -s +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aks/mdmonboarding/mdm_onboarding_atscale.sh | bash -s ``` The configuration change can take a few minutes to complete. When it finishes, you see a message similar to the following that includes the result: @@ -43,7 +43,7 @@ completed role assignments for all AKS clusters in subscription: /resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" "clusterName of AKS-Engine cluster" +# https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "name of the cloud" "00000000-0000-0000-0000-000000000000" "Resource Group Name of AKS-Engine cluster" "/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/" "clusterName of AKS-Engine cluster" # nameoftheCloud=${1} diff --git a/scripts/onboarding/attach-monitoring-tags.md b/scripts/onboarding/attach-monitoring-tags.md index f1c9a2e32..f7a802750 100644 --- a/scripts/onboarding/attach-monitoring-tags.md +++ b/scripts/onboarding/attach-monitoring-tags.md @@ -10,7 +10,7 @@ If you are not familiar with the concepts of azure resource tags (https://docs.m ## Attach tags using Powershell Get the below powershell script files to your local computer. - - Powershell script file [AddMonitoringWorkspaceTags.ps1](https://github.com/Microsoft/OMS-docker/blob/ci_feature/docs/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1) + - Powershell script file [AddMonitoringWorkspaceTags.ps1](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringWorkspaceTags.ps1) - Refer for updating the Powershell execution policy (https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.security/set-executionpolicy?view=powershell-6) - Log analytics workspace resource Id can retrieved either Azure CLI or Powershell or Azure Portal Azure CLI @@ -50,14 +50,14 @@ The configuration change can take a few minutes to complete. When it finishes, y ``` sh -curl -sL https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/docs/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s Example for AKS-Engine clusters in Azure Public cloud -curl -sL https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/docs/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" Example for AKS-Engine clusters in Azure China cloud -curl -sL https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/docs/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureChinaCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" +curl -sL https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/aksengine/kubernetes/AddMonitoringOnboardingTags.sh | bash -s "AzureChinaCloud" "00000000-0000-0000-0000-000000000000" "my-aks-engine-cluster-rg" "/subscriptions//resourceGroups/workspaceRg/providers/Microsoft.OperationalInsights/workspaces/workspaceName" "my-aks-engine-cluster" ``` diff --git a/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh b/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh index b66dca67d..e2afa579d 100644 --- a/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh +++ b/scripts/onboarding/hybrid/onboarding_azuremonitor_for_containers.sh @@ -151,7 +151,7 @@ echo "workspaceResourceId:"$workspaceResourceId echo "workspaceGuid:"$workspaceGuid echo "adding containerinsights solution to workspace" -solution=$(az group deployment create -g $defaultWorkspaceResourceGroup --template-uri https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/docs/templates/azuremonitor-containerSolution.json --parameters workspaceResourceId=$workspaceResourceId --parameters workspaceRegion=$workspaceRegion) +solution=$(az group deployment create -g $defaultWorkspaceResourceGroup --template-uri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json --parameters workspaceResourceId=$workspaceResourceId --parameters workspaceRegion=$workspaceRegion) echo "getting workspace primaryshared key" workspaceKey=$(az rest --method post --uri $workspaceResourceId/sharedKeys?api-version=2015-11-01-preview --query primarySharedKey) diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index 41ba2adb0..1c011bfff 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -1,18 +1,24 @@ <# .DESCRIPTION - Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc K8s, ARO v4 and AKS etc. + Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc enabled Kubernetes, ARO v4 and AKS etc. 1. Deletes the existing Azure Monitor for containers helm release 2. Deletes logAnalyticsWorkspaceResourceId tag on the provided Managed cluster .PARAMETER clusterResourceId - Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + Id of the Azure Managed Cluster such as Azure Arc enabled Kubernetes, ARO v4 etc. + .PARAMETER servicePrincipalClientId + client Id of the service principal which will be used for the azure login + .PARAMETER servicePrincipalClientSecret + client secret of the service principal which will be used for the azure login + .PARAMETER tenantId + tenantId of the service principal which will be used for the azure login .PARAMETER kubeContext (optional) kube-context of the k8 cluster to install Azure Monitor for containers HELM chart Pre-requisites: - Azure Managed cluster Resource Id - - Contributor role permission on the Subscription of the Azure Arc Cluster + - Contributor role permission on the Subscription of the Azure Arc enabled Kubernetes Cluster - Helm v3.0.0 or higher https://github.com/helm/helm/releases - kube-context of the K8s cluster Note: 1. Please make sure you have all the pre-requisistes before running this script. @@ -22,6 +28,11 @@ param( [Parameter(mandatory = $true)] [string]$clusterResourceId, + [string]$servicePrincipalClientId, + [Parameter(mandatory = $false)] + [string]$servicePrincipalClientSecret, + [Parameter(mandatory = $false)] + [string]$tenantId, [Parameter(mandatory = $false)] [string]$kubeContext ) @@ -33,6 +44,7 @@ $helmChartName = "azuremonitor-containers" $isArcK8sCluster = $false $isAksCluster = $false $isAroV4Cluster = $false +$isUsingServicePrincipal = $false # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts @@ -199,11 +211,24 @@ if ($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedcluster $isAroV4Cluster = $true } +if(([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and + ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and + ([string]::IsNullOrEmpty($tenantId) -eq $false)) { + Write-Host("Using service principal creds for the azure login since provided.") + $isUsingServicePrincipal = $true + } + $resourceParts = $clusterResourceId.Split("/") $clusterSubscriptionId = $resourceParts[2] Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -ForegroundColor Green +if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId +} + try { Write-Host("") Write-Host("Trying to get the current Az login context...") @@ -220,8 +245,15 @@ catch { if ($null -eq $account.Account) { try { - Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + + if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + } else { + Write-Host("Please login...") + Connect-AzAccount -subscriptionid $clusterSubscriptionId + } } catch { Write-Host("") @@ -266,7 +298,7 @@ if ($isArcK8sCluster -eq $true) { # validate identity $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() if ($clusterIdentity.Contains("systemassigned") -eq $false) { - Write-Host("Identity of Azure Arc K8s cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red + Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red exit } } @@ -322,7 +354,3 @@ catch { } Write-Host("Successfully disabled Azure Monitor for containers for cluster: $clusteResourceId") -ForegroundColor Green - - - - diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index f55b4e617..c11426f30 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -14,7 +14,10 @@ # 1. disable monitoring using current kube-context # bash disable_monitoring.sh --resource-id/-r -# 2. disable monitoring using specific kube-context +# 2. disable monitoring using specific kube-context using service principal creds for the azure login +# bash disable_monitoring.sh --resource-id --client-id --client-secret --tenant-id + +# 3. disable monitoring using specific kube-context # bash disable_monitoring.sh --resource-id/-r --kube-context/-k @@ -23,10 +26,10 @@ set -o pipefail # default release name used during onboarding releaseName="azmon-containers-release-1" -# resource type for azure arc clusters +# resource type for Azure Arc enabled Kubernetes clusters resourceProvider="Microsoft.Kubernetes/connectedClusters" -# resource provider for azure arc connected cluster +# resource provider for Azure Arc enabled Kubernetes cluster arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" # resource provider for azure redhat openshift v4 cluster aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" @@ -48,12 +51,18 @@ isAroV4Cluster=false clusterResourceId="" kubeconfigContext="" +# sp details for the login if provided +servicePrincipalClientId="" +servicePrincipalClientSecret="" +servicePrincipalTenantId="" +isUsingServicePrincipal=false + usage() { local basename=`basename $0` echo echo "Disable Azure Monitor for containers:" - echo "$basename --resource-id/-r [--kube-context/-k ]" + echo "$basename --resource-id/-r [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context/-k ]" } delete_helm_release() @@ -105,19 +114,24 @@ remove_monitoring_tags() { echo "deleting monitoring tags ..." - echo "login to the azure interactively" - az login --use-device-code + if [ "$isUsingServicePrincipal" = true ] ; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + else + echo "login to the azure interactively" + az login --use-device-code + fi echo "set the cluster subscription id: ${clusterSubscriptionId}" az account set -s ${clusterSubscriptionId} - # validate cluster identity for ARC k8s cluster + # validate cluster identity for Azure Arc enabled Kubernetes cluster if [ "$isArcK8sCluster" = true ] ; then identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then - echo "-e only supported cluster identity is systemassigned for Azure ARC K8s cluster type" + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" exit 1 fi fi @@ -159,6 +173,9 @@ for arg in "$@"; do case "$arg" in "--resource-id") set -- "$@" "-r" ;; "--kube-context") set -- "$@" "-k" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-s" ;; + "--tenant-id") set -- "$@" "-t" ;; "--help") set -- "$@" "-h" ;; "--"*) usage ;; *) set -- "$@" "$arg" @@ -167,7 +184,7 @@ done local OPTIND opt - while getopts 'hk:r:' opt; do + while getopts 'hk:c:s:t:r:' opt; do case "$opt" in h) usage @@ -183,6 +200,21 @@ done echo "clusterResourceId is $OPTARG" ;; + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + ?) usage exit 1 @@ -225,7 +257,7 @@ done # detect the resource provider from the provider name in the cluster resource id if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then - echo "provider cluster resource is of Azure ARC K8s cluster type" + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" isArcK8sCluster=true resourceProvider=$arcK8sResourceProvider elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then @@ -241,6 +273,11 @@ done exit 1 fi + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi + } diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 346cdc81a..1e1669400 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -1,14 +1,20 @@ <# .DESCRIPTION - Onboards Azure Monitor for containers to Azure Managed Kuberenetes such as Azure Arc K8s, ARO v4 and AKS etc. + Onboards Azure Monitor for containers to Azure Managed Kuberenetes such as Azure Arc enabled Kubernetes, ARO v4 and AKS etc. 1. Creates the Default Azure log analytics workspace if doesn't exist one in specified subscription 2. Adds the ContainerInsights solution to the Azure log analytics workspace 3. Adds the workspaceResourceId tag or enable addon (if the cluster is AKS) on the provided Managed cluster resource id 4. Installs Azure Monitor for containers HELM chart to the K8s cluster in provided via --kube-context .PARAMETER clusterResourceId - Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + Id of the Azure Managed Cluster such as Azure Arc enabled Kubernetes, ARO v4 etc. + .PARAMETER servicePrincipalClientId + Client Id of the service principal which will be used for the azure login + .PARAMETER servicePrincipalClientSecret + Client secret of the service principal which will be used for the azure login + .PARAMETER tenantId + Azure TenantId of the service principal which will be used for the azure login .PARAMETER kubeContext (optional) kube-context of the k8 cluster to install Azure Monitor for containers HELM chart .PARAMETER workspaceResourceId (optional) @@ -16,10 +22,6 @@ .PARAMETER proxyEndpoint (optional) Provide Proxy endpoint if you have K8s cluster behind the proxy and would like to route Azure Monitor for containers outbound traffic via proxy. Format of the proxy endpoint should be http(s://:@: - .PARAMETER helmRepoName (optional) - helm repo name. should be used only for the private preview features - .PARAMETER helmRepoUrl (optional) - helm repo url. should be used only for the private preview features Pre-requisites: - Azure Managed cluster Resource Id @@ -34,33 +36,33 @@ param( [Parameter(mandatory = $true)] [string]$clusterResourceId, [Parameter(mandatory = $false)] - [string]$kubeContext, + [string]$servicePrincipalClientId, [Parameter(mandatory = $false)] - [string]$workspaceResourceId, + [string]$servicePrincipalClientSecret, [Parameter(mandatory = $false)] - [string]$proxyEndpoint, + [string]$tenantId, + [Parameter(mandatory = $false)] + [string]$kubeContext, [Parameter(mandatory = $false)] - [string]$helmRepoName, + [string]$workspaceResourceId, [Parameter(mandatory = $false)] - [string]$helmRepoUrl + [string]$proxyEndpoint ) -$solutionTemplateUri= "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" +$solutionTemplateUri = "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" $helmChartReleaseName = "azmon-containers-release-1" $helmChartName = "azuremonitor-containers" -$helmChartRepoName = "incubator" -$helmChartRepoUrl = "https://kubernetes-charts-incubator.storage.googleapis.com/" + # flags to indicate the cluster types $isArcK8sCluster = $false -$isAksCluster = $false - -if([string]::IsNullOrEmpty($helmRepoName) -eq $false){ - $helmChartRepoName = $helmRepoName -} +$isAksCluster = $false +$isUsingServicePrincipal = $false -if([string]::IsNullOrEmpty($helmRepoUrl) -eq $false){ - $helmChartRepoUrl = $helmRepoUrl -} +# released chart version in mcr +$mcr = "mcr.microsoft.com" +$mcrChartVersion = "2.7.6" +$mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" +$helmLocalRepoName = "." # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts @@ -187,7 +189,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } if ([string]::IsNullOrEmpty($clusterResourceId)) { - Write-Host("Specified Azure Arc ClusterResourceId should not be NULL or empty") -ForegroundColor Red + Write-Host("Specified Azure Arc enabled Kubernetes ClusterResourceId should not be NULL or empty") -ForegroundColor Red exit } @@ -207,23 +209,31 @@ if ($clusterResourceId.StartsWith("/") -eq $false) { $clusterResourceId = "/" + $clusterResourceId } -if ($clusterResourceId.Split("/").Length -ne 9){ - Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red +if ($clusterResourceId.Split("/").Length -ne 9) { + Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red exit } if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -ne $true) -and ($clusterResourceId.ToLower().Contains("microsoft.redhatopenshift/openshiftclusters") -ne $true) -and ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -ne $true) - ) { +) { Write-Host("Provided cluster ResourceId is not supported cluster type: $clusterResourceId") -ForegroundColor Red exit } +if (([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and + ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and + ([string]::IsNullOrEmpty($tenantId) -eq $false)) { + Write-Host("Using service principal creds for the azure login since these provided.") + $isUsingServicePrincipal = $true +} + if ($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -eq $true) { - $isArcK8sCluster = $true -} elseif ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -eq $true) { - $isAksCluster = $true + $isArcK8sCluster = $true +} +elseif ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -eq $true) { + $isAksCluster = $true } $resourceParts = $clusterResourceId.Split("/") @@ -231,6 +241,12 @@ $clusterSubscriptionId = $resourceParts[2] Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -ForegroundColor Green +if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId +} + try { Write-Host("") Write-Host("Trying to get the current Az login context...") @@ -247,8 +263,15 @@ catch { if ($null -eq $account.Account) { try { - Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + if ($isUsingServicePrincipal) { + $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + } + else { + Write-Host("Please login...") + Connect-AzAccount -subscriptionid $clusterSubscriptionId + } } catch { Write-Host("") @@ -290,12 +313,12 @@ if ($null -eq $clusterResource) { $clusterRegion = $clusterResource.Location.ToLower() if ($isArcK8sCluster -eq $true) { - # validate identity - $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() - if ($clusterIdentity.contains("systemassigned") -eq $false) { - Write-Host("Identity of Azure Arc K8s cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red - exit - } + # validate identity + $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() + if ($clusterIdentity.contains("systemassigned") -eq $false) { + Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red + exit + } } if ([string]::IsNullOrEmpty($workspaceResourceId)) { @@ -482,7 +505,8 @@ if ($account.Subscription.Id -eq $clusterSubscriptionId) { if ($isAksCluster -eq $true) { Write-Host ("Enabling AKS Monitoring Addon ..") # TBD -} else { +} +else { Write-Host("Attaching workspaceResourceId tag on the cluster ResourceId") $clusterResource.Tags["logAnalyticsWorkspaceResourceId"] = $WorkspaceInformation.ResourceId Set-AzResource -Tag $clusterResource.Tags -ResourceId $clusterResource.ResourceId -Force @@ -494,20 +518,30 @@ Write-Host "Helm version" : $helmVersion Write-Host("Installing or upgrading if exists, Azure Monitor for containers HELM chart ...") try { - Write-Host("Adding $helmChartRepoName repo to helm: $helmChartRepoUrl") - helm repo add $helmChartRepoName $helmChartRepoUrl - Write-Host("updating helm repo to get latest version of charts") - helm repo update - $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId" - if([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { + Write-Host("pull the chart from mcr.microsoft.com") + [System.Environment]::SetEnvironmentVariable("HELM_EXPERIMENTAL_OCI", 1, "Process") + + Write-Host("pull the chart from mcr.microsoft.com") + helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} + + Write-Host("export the chart from local cache to current directory") + helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + + $helmChartRepoPath = "${helmLocalRepoName}" + "/" + "${helmChartName}" + + Write-Host("helmChartRepoPath is : ${helmChartRepoPath}") + + $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" + if ([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { Write-Host("using proxy endpoint since its provided") $helmParameters = $helmParameters + ",omsagent.proxy=$proxyEndpoint" } if ([string]::IsNullOrEmpty($kubeContext)) { - helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoName/$helmChartName - } else { - Write-Host("using provided kube-context: $kubeContext") - helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoName/$helmChartName --kube-context $kubeContext + helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoPath + } + else { + Write-Host("using provided kube-context: $kubeContext") + helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoPath --kube-context $kubeContext } } catch { @@ -516,7 +550,3 @@ catch { Write-Host("Successfully enabled Azure Monitor for containers for cluster: $clusterResourceId") -ForegroundColor Green Write-Host("Proceed to https://aka.ms/azmon-containers to view your newly onboarded Azure Managed cluster") -ForegroundColor Green - - - - diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 5a8e7e040..ce62a581a 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -20,17 +20,19 @@ # 1. Using Default Azure Log Analytics and no-proxy with current kube config context # bash enable-monitoring.sh --resource-id -# 2. Using Default Azure Log Analytics and no-proxy +# 2. Using Default Azure Log Analytics and no-proxy with current kube config context, and using service principal creds for the azure login +# bash enable-monitoring.sh --resource-id --client-id --client-secret --tenant-id + +# 3. Using Default Azure Log Analytics and no-proxy # bash enable-monitoring.sh --resource-id --kube-context -# 3. Using Default Azure Log Analytics and with proxy endpoint configuration +# 4. Using Default Azure Log Analytics and with proxy endpoint configuration # bash enable-monitoring.sh --resource-id --kube-context --proxy - -# 4. Using Existing Azure Log Analytics and no-proxy +# 5. Using Existing Azure Log Analytics and no-proxy # bash enable-monitoring.sh --resource-id --kube-context --workspace-id -# 5. Using Existing Azure Log Analytics and proxy +# 6. Using Existing Azure Log Analytics and proxy # bash enable-monitoring.sh --resource-id --kube-context --workspace-id --proxy set -e @@ -39,9 +41,11 @@ set -o pipefail # default to public cloud since only supported cloud is azure public clod defaultAzureCloud="AzureCloud" -# helm repo details -helmRepoName="incubator" -helmRepoUrl="https://kubernetes-charts-incubator.storage.googleapis.com/" +# released chart version in mcr +mcrChartVersion="2.7.6" +mcr="mcr.microsoft.com" +mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" +helmLocalRepoName="." helmChartName="azuremonitor-containers" # default release name used during onboarding @@ -56,19 +60,18 @@ aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" # resource provider for aks cluster aksResourceProvider="Microsoft.ContainerService/managedClusters" -# default of resourceProvider is arc k8s and this will get updated based on the provider cluster resource +# default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource resourceProvider="Microsoft.Kubernetes/connectedClusters" - # resource type for azure log analytics workspace workspaceResourceProvider="Microsoft.OperationalInsights/workspaces" # openshift project name for aro v4 cluster openshiftProjectName="azure-monitor-for-containers" -# arc k8s cluster resource +# AROv4 cluster resource isAroV4Cluster=false -# arc k8s cluster resource +# Azure Arc enabled Kubernetes cluster resource isArcK8sCluster=false # aks cluster resource @@ -95,142 +98,162 @@ workspaceResourceGroup="DefaultResourceGroup-"$workspaceRegionCode workspaceGuid="" workspaceKey="" -usage() -{ - local basename=`basename $0` - echo - echo "Enable Azure Monitor for containers:" - echo "$basename --resource-id [--kube-context ] [--workspace-id ] [--proxy ]" +# sp details for the login if provided +servicePrincipalClientId="" +servicePrincipalClientSecret="" +servicePrincipalTenantId="" +isUsingServicePrincipal=false + +usage() { + local basename=$(basename $0) + echo + echo "Enable Azure Monitor for containers:" + echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ] [--workspace-id ] [--proxy ]" } -parse_args() -{ +parse_args() { - if [ $# -le 1 ] - then + if [ $# -le 1 ]; then usage exit 1 - fi + fi -# Transform long options to short ones -for arg in "$@"; do - shift - case "$arg" in - "--resource-id") set -- "$@" "-r" ;; + # Transform long options to short ones + for arg in "$@"; do + shift + case "$arg" in + "--resource-id") set -- "$@" "-r" ;; "--kube-context") set -- "$@" "-k" ;; "--workspace-id") set -- "$@" "-w" ;; "--proxy") set -- "$@" "-p" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-s" ;; + "--tenant-id") set -- "$@" "-t" ;; "--helm-repo-name") set -- "$@" "-n" ;; "--helm-repo-url") set -- "$@" "-u" ;; - "--"*) usage ;; - *) set -- "$@" "$arg" - esac -done + "--container-log-volume") set -- "$@" "-v" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" ;; + esac + done -local OPTIND opt + local OPTIND opt -while getopts 'hk:r:w:p:n:u:' opt; do + while getopts 'hk:r:w:p:c:s:t:n:u:v:' opt; do case "$opt" in - h) + h) usage - ;; - - k) - kubeconfigContext="$OPTARG" - echo "name of kube-context is $OPTARG" - ;; - - r) - clusterResourceId="$OPTARG" - echo "clusterResourceId is $OPTARG" - ;; - - w) - workspaceResourceId="$OPTARG" - echo "workspaceResourceId is $OPTARG" - ;; - - p) - proxyEndpoint="$OPTARG" - echo "proxyEndpoint is $OPTARG" - ;; - - n) - helmRepoName="$OPTARG" - echo "helm repo name is $OPTARG" - ;; - - u) - helmRepoUrl="$OPTARG" - echo "helm repo url is $OPTARG" - ;; - - v) - containerLogVolume="$OPTARG" - echo "container log volume is $OPTARG" - ;; - - ?) - usage - exit 1 - ;; + ;; + + k) + kubeconfigContext="$OPTARG" + echo "name of kube-context is $OPTARG" + ;; + + r) + clusterResourceId="$OPTARG" + echo "clusterResourceId is $OPTARG" + ;; + + w) + workspaceResourceId="$OPTARG" + echo "workspaceResourceId is $OPTARG" + ;; + + p) + proxyEndpoint="$OPTARG" + echo "proxyEndpoint is $OPTARG" + ;; + + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + + n) + helmRepoName="$OPTARG" + echo "helm repo name is $OPTARG" + ;; + + u) + helmRepoUrl="$OPTARG" + echo "helm repo url is $OPTARG" + ;; + + v) + containerLogVolume="$OPTARG" + echo "container log volume is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; esac done - shift "$(($OPTIND -1))" - + shift "$(($OPTIND - 1))" - local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" - local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" + local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" + local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" - # get resource parts and join back to get the provider name - local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" - local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" - local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2} )" + # get resource parts and join back to get the provider name + local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" + local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" + local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2})" - local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" + local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" - # convert to lowercase for validation - providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") + # convert to lowercase for validation + providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") - echo "cluster SubscriptionId:" $subscriptionId - echo "cluster ResourceGroup:" $resourceGroup - echo "cluster ProviderName:" $providerName - echo "cluster Name:" $clusterName + echo "cluster SubscriptionId:" $subscriptionId + echo "cluster ResourceGroup:" $resourceGroup + echo "cluster ProviderName:" $providerName + echo "cluster Name:" $clusterName - if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then + if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then echo "-e invalid cluster resource id. Please try with valid fully qualified resource id of the cluster" exit 1 - fi + fi - if [[ $providerName != microsoft.* ]]; then - echo "-e invalid azure cluster resource id format." - exit 1 - fi + if [[ $providerName != microsoft.* ]]; then + echo "-e invalid azure cluster resource id format." + exit 1 + fi - # detect the resource provider from the provider name in the cluster resource id - # detect the resource provider from the provider name in the cluster resource id - if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then - echo "provider cluster resource is of Azure ARC K8s cluster type" + # detect the resource provider from the provider name in the cluster resource id + if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" isArcK8sCluster=true resourceProvider=$arcK8sResourceProvider - elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then + elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then echo "provider cluster resource is of AROv4 cluster type" resourceProvider=$aroV4ResourceProvider isAroV4Cluster=true - elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then + elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then echo "provider cluster resource is of AKS cluster type" isAksCluster=true resourceProvider=$aksResourceProvider - else - echo "-e unsupported azure managed cluster type" - exit 1 - fi + else + echo "-e unsupported azure managed cluster type" + exit 1 + fi - if [ -z "$kubeconfigContext" ]; then + if [ -z "$kubeconfigContext" ]; then echo "using or getting current kube config context since --kube-context parameter not set " - fi + fi -if [ ! -z "$workspaceResourceId" ]; then + if [ ! -z "$workspaceResourceId" ]; then local workspaceSubscriptionId="$(echo $workspaceResourceId | cut -d'/' -f3)" local workspaceResourceGroup="$(echo $workspaceResourceId | cut -d'/' -f5)" local workspaceProviderName="$(echo $workspaceResourceId | cut -d'/' -f7)" @@ -242,13 +265,13 @@ if [ ! -z "$workspaceResourceId" ]; then echo "workspace ProviderName:" $workspaceName echo "workspace Name:" $workspaceName - if [[ $workspaceProviderName != microsoft.operationalinsights* ]]; then - echo "-e invalid azure log analytics resource id format." - exit 1 - fi -fi + if [[ $workspaceProviderName != microsoft.operationalinsights* ]]; then + echo "-e invalid azure log analytics resource id format." + exit 1 + fi + fi -if [ ! -z "$proxyEndpoint" ]; then + if [ ! -z "$proxyEndpoint" ]; then # Validate Proxy Endpoint URL # extract the protocol:// proto="$(echo $proxyEndpoint | grep :// | sed -e's,^\(.*://\).*,\1,g')" @@ -275,18 +298,21 @@ if [ ! -z "$proxyEndpoint" ]; then else echo "successfully validated provided proxy endpoint is valid and in expected format" fi -fi + fi + + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi } -configure_to_public_cloud() -{ +configure_to_public_cloud() { echo "Set AzureCloud as active cloud for az cli" az cloud set -n $defaultAzureCloud } -validate_cluster_identity() -{ +validate_cluster_identity() { echo "validating cluster identity" local rgName="$(echo ${1})" @@ -297,90 +323,89 @@ validate_cluster_identity() echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then - echo "-e only supported cluster identity is systemassigned for Azure ARC K8s cluster type" - exit 1 + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" + exit 1 fi echo "successfully validated the identity of the cluster" } -create_default_log_analytics_workspace() -{ +create_default_log_analytics_workspace() { # extract subscription from cluster resource id local subscriptionId="$(echo $clusterResourceId | cut -d'/' -f3)" - local clusterRegion=$(az resource show --ids ${clusterResourceId} --query location) + local clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) + # convert cluster region to lower case + clusterRegion=$(echo $clusterRegion | tr "[:upper:]" "[:lower:]") echo "cluster region:" $clusterRegion # mapping fors for default Azure Log Analytics workspace declare -A AzureCloudLocationToOmsRegionCodeMap=( - [australiasoutheast]=ASE - [australiaeast]=EAU - [australiacentral]=CAU - [canadacentral]=CCA - [centralindia]=CIN - [centralus]=CUS - [eastasia]=EA - [eastus]=EUS - [eastus2]=EUS2 - [eastus2euap]=EAP - [francecentral]=PAR - [japaneast]=EJP - [koreacentral]=SE - [northeurope]=NEU - [southcentralus]=SCUS - [southeastasia]=SEA - [uksouth]=SUK - [usgovvirginia]=USGV - [westcentralus]=EUS - [westeurope]=WEU - [westus]=WUS - [westus2]=WUS2 + [australiasoutheast]=ASE + [australiaeast]=EAU + [australiacentral]=CAU + [canadacentral]=CCA + [centralindia]=CIN + [centralus]=CUS + [eastasia]=EA + [eastus]=EUS + [eastus2]=EUS2 + [eastus2euap]=EAP + [francecentral]=PAR + [japaneast]=EJP + [koreacentral]=SE + [northeurope]=NEU + [southcentralus]=SCUS + [southeastasia]=SEA + [uksouth]=SUK + [usgovvirginia]=USGV + [westcentralus]=EUS + [westeurope]=WEU + [westus]=WUS + [westus2]=WUS2 ) declare -A AzureCloudRegionToOmsRegionMap=( - [australiacentral]=australiacentral - [australiacentral2]=australiacentral - [australiaeast]=australiaeast - [australiasoutheast]=australiasoutheast - [brazilsouth]=southcentralus - [canadacentral]=canadacentral - [canadaeast]=canadacentral - [centralus]=centralus - [centralindia]=centralindia - [eastasia]=eastasia - [eastus]=eastus - [eastus2]=eastus2 - [francecentral]=francecentral - [francesouth]=francecentral - [japaneast]=japaneast - [japanwest]=japaneast - [koreacentral]=koreacentral - [koreasouth]=koreacentral - [northcentralus]=eastus - [northeurope]=northeurope - [southafricanorth]=westeurope - [southafricawest]=westeurope - [southcentralus]=southcentralus - [southeastasia]=southeastasia - [southindia]=centralindia - [uksouth]=uksouth - [ukwest]=uksouth - [westcentralus]=eastus - [westeurope]=westeurope - [westindia]=centralindia - [westus]=westus - [westus2]=westus2 + [australiacentral]=australiacentral + [australiacentral2]=australiacentral + [australiaeast]=australiaeast + [australiasoutheast]=australiasoutheast + [brazilsouth]=southcentralus + [canadacentral]=canadacentral + [canadaeast]=canadacentral + [centralus]=centralus + [centralindia]=centralindia + [eastasia]=eastasia + [eastus]=eastus + [eastus2]=eastus2 + [francecentral]=francecentral + [francesouth]=francecentral + [japaneast]=japaneast + [japanwest]=japaneast + [koreacentral]=koreacentral + [koreasouth]=koreacentral + [northcentralus]=eastus + [northeurope]=northeurope + [southafricanorth]=westeurope + [southafricawest]=westeurope + [southcentralus]=southcentralus + [southeastasia]=southeastasia + [southindia]=centralindia + [uksouth]=uksouth + [ukwest]=uksouth + [westcentralus]=eastus + [westeurope]=westeurope + [westindia]=centralindia + [westus]=westus + [westus2]=westus2 ) - if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; - then + if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; then workspaceRegion=${AzureCloudRegionToOmsRegionMap[$clusterRegion]} fi echo "Workspace Region:"$workspaceRegion - if [ -n "${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]}" ]; - then + if [ -n "${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]}" ]; then workspaceRegionCode=${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]} fi echo "Workspace Region Code:"$workspaceRegionCode @@ -389,30 +414,28 @@ create_default_log_analytics_workspace() isRGExists=$(az group exists -g $workspaceResourceGroup) workspaceName="DefaultWorkspace-"$subscriptionId"-"$workspaceRegionCode - if $isRGExists - then echo "using existing default resource group:"$workspaceResourceGroup + if $isRGExists; then + echo "using existing default resource group:"$workspaceResourceGroup else echo "creating resource group: $workspaceResourceGroup in region: $workspaceRegion" az group create -g $workspaceResourceGroup -l $workspaceRegion fi - workspaceList=$(az resource list -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider) - if [ "$workspaceList" = "[]" ]; - then - # create new default workspace since no mapped existing default workspace - echo '{"location":"'"$workspaceRegion"'", "properties":{"sku":{"name": "standalone"}}}' > WorkspaceProps.json - cat WorkspaceProps.json - workspace=$(az resource create -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --is-full-object -p @WorkspaceProps.json) + workspaceList=$(az resource list -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider) + if [ "$workspaceList" = "[]" ]; then + # create new default workspace since no mapped existing default workspace + echo '{"location":"'"$workspaceRegion"'", "properties":{"sku":{"name": "standalone"}}}' >WorkspaceProps.json + cat WorkspaceProps.json + workspace=$(az resource create -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --is-full-object -p @WorkspaceProps.json) else echo "using existing default workspace:"$workspaceName fi - workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) + workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') } -add_container_insights_solution() -{ +add_container_insights_solution() { local resourceId="$(echo ${1})" # extract resource group from workspace resource id @@ -422,10 +445,9 @@ add_container_insights_solution() solution=$(az deployment group create -g $resourceGroup --template-uri $solutionTemplateUri --parameters workspaceResourceId=$resourceId --parameters workspaceRegion=$workspaceRegion) } -get_workspace_guid_and_key() -{ +get_workspace_guid_and_key() { # extract resource parts from workspace resource id - local resourceId="$(echo ${1} | tr -d '"' )" + local resourceId="$(echo ${1} | tr -d '"')" local subId="$(echo ${resourceId} | cut -d'/' -f3)" local rgName="$(echo ${resourceId} | cut -d'/' -f5)" local wsName="$(echo ${resourceId} | cut -d'/' -f9)" @@ -440,11 +462,10 @@ get_workspace_guid_and_key() workspaceKey=$(echo $workspaceKey | tr -d '"') } -install_helm_chart() -{ +install_helm_chart() { - # get the config-context for ARO v4 cluster - if [ "$isAroV4Cluster" = true ] ; then + # get the config-context for ARO v4 cluster + if [ "$isAroV4Cluster" = true ]; then echo "getting config-context of ARO v4 cluster " echo "getting admin user creds for aro v4 cluster" adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) @@ -456,74 +477,84 @@ install_helm_chart() oc new-project $openshiftProjectName echo "getting config-context of aro v4 cluster" kubeconfigContext=$(oc config current-context) - fi - - if [ -z "$kubeconfigContext" ]; then - echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." - else - echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." - fi - - echo "adding helm repo:" $helmRepoName - helm repo add $helmRepoName $helmRepoUrl - - echo "updating helm repo to get latest charts" - helm repo update - - if [ ! -z "$proxyEndpoint" ]; then - echo "using proxy endpoint since proxy configuration passed in" - if [ -z "$kubeconfigContext" ]; then - echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName - else - echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} - fi - else - if [ -z "$kubeconfigContext" ]; then - echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName - else - echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} - fi - fi - - echo "chart installation completed." + fi + + if [ -z "$kubeconfigContext" ]; then + echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." + else + echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." + fi + + echo "getting the region of the cluster" + clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) + echo "cluster region is : ${clusterRegion}" + + echo "pull the chart version ${mcrChartVersion} from ${mcr}/${mcrChartRepoPath}" + export HELM_EXPERIMENTAL_OCI=1 + helm chart pull $mcr/$mcrChartRepoPath:$mcrChartVersion + + echo "export the chart from local cache to current directory" + helm chart export $mcr/$mcrChartRepoPath:$mcrChartVersion --destination . + + helmChartRepoPath=$helmLocalRepoName/$helmChartName + + echo "helm chart repo path: ${helmChartRepoPath}" + + if [ ! -z "$proxyEndpoint" ]; then + echo "using proxy endpoint since proxy configuration passed in" + if [ -z "$kubeconfigContext" ]; then + echo "using current kube-context since --kube-context/-k parameter not passed in" + helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + else + echo "using --kube-context:${kubeconfigContext} since passed in" + helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + fi + else + if [ -z "$kubeconfigContext" ]; then + echo "using current kube-context since --kube-context/-k parameter not passed in" + helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + else + echo "using --kube-context:${kubeconfigContext} since passed in" + helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + fi + fi + + echo "chart installation completed." } -login_to_azure() -{ - echo "login to the azure interactively" - az login --use-device-code +login_to_azure() { + if [ "$isUsingServicePrincipal" = true ]; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + else + echo "login to the azure interactively" + az login --use-device-code + fi } -set_azure_subscription() -{ - local subscriptionId="$(echo ${1})" - echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" - az account set -s ${subscriptionId} - echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" +set_azure_subscription() { + local subscriptionId="$(echo ${1})" + echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" + az account set -s ${subscriptionId} + echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" } -attach_monitoring_tags() -{ +attach_monitoring_tags() { echo "attach loganalyticsworkspaceResourceId tag on to cluster resource" - status=$(az resource update --set tags.logAnalyticsWorkspaceResourceId=$workspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + status=$(az resource update --set tags.logAnalyticsWorkspaceResourceId=$workspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) echo "$status" echo "successfully attached logAnalyticsWorkspaceResourceId tag on the cluster resource" } # enables aks monitoring addon for private preview and dont use this for aks prod -enable_aks_monitoring_addon() -{ - echo "getting cluster object" - clusterGetResponse=$(az rest --method get --uri $clusterResourceId?api-version=2020-03-01) - export jqquery=".properties.addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID=\"$workspaceResourceId\"" - echo $clusterGetResponse | jq $jqquery > putrequestbody.json - status=$(az rest --method put --uri $clusterResourceId?api-version=2020-03-01 --body @putrequestbody.json --headers Content-Type=application/json) - echo "status after enabling of aks monitoringa addon:$status" +enable_aks_monitoring_addon() { + echo "getting cluster object" + clusterGetResponse=$(az rest --method get --uri $clusterResourceId?api-version=2020-03-01) + export jqquery=".properties.addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID=\"$workspaceResourceId\"" + echo $clusterGetResponse | jq $jqquery >putrequestbody.json + status=$(az rest --method put --uri $clusterResourceId?api-version=2020-03-01 --body @putrequestbody.json --headers Content-Type=application/json) + echo "status after enabling of aks monitoringa addon:$status" } # parse and validate args @@ -544,9 +575,9 @@ login_to_azure # set the cluster subscription id as active sub for azure cli set_azure_subscription $clusterSubscriptionId -# validate cluster identity if its ARC k8s cluster -if [ "$isArcK8sCluster" = true ] ; then - validate_cluster_identity $clusterResourceGroup $clusterName +# validate cluster identity if its Azure Arc enabled Kubernetes cluster +if [ "$isArcK8sCluster" = true ]; then + validate_cluster_identity $clusterResourceGroup $clusterName fi if [ -z $workspaceResourceId ]; then @@ -555,7 +586,7 @@ if [ -z $workspaceResourceId ]; then else echo "using provided azure log analytics workspace:${workspaceResourceId}" workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') - workspaceSubscriptionId="$(echo ${workspaceResourceId} | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]" )" + workspaceSubscriptionId="$(echo ${workspaceResourceId} | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" workspaceResourceGroup="$(echo ${workspaceResourceId} | cut -d'/' -f5)" workspaceName="$(echo ${workspaceResourceId} | cut -d'/' -f9)" @@ -577,13 +608,13 @@ add_container_insights_solution $workspaceResourceId # get workspace guid and key get_workspace_guid_and_key $workspaceResourceId -if [ "$isClusterAndWorkspaceInSameSubscription" = true ] ; then +if [ "$isClusterAndWorkspaceInSameSubscription" = false ]; then echo "switch to cluster subscription id as active subscription for cli: ${clusterSubscriptionId}" set_azure_subscription $clusterSubscriptionId fi # attach monitoring tags on to cluster resource -if [ "$isAksCluster" = true ] ; then +if [ "$isAksCluster" = true ]; then enable_aks_monitoring_addon else attach_monitoring_tags diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh new file mode 100644 index 000000000..8a12b2f02 --- /dev/null +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -0,0 +1,314 @@ +#!/bin/bash +# +# Execute this directly in Azure Cloud Shell (https://shell.azure.com) by pasting (SHIFT+INS on Windows, CTRL+V on Mac or Linux) +# the following line (beginning with curl...) at the command prompt and then replacing the args: +# This scripts upgrades the existing Azure Monitor for containers release on Azure Arc enabled Kubernetes cluster +# +# 1. Upgrades existing Azure Monitor for containers release to the K8s cluster in provided via --kube-context +# Prerequisites : +# Azure CLI: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest +# Helm3 : https://helm.sh/docs/intro/install/ + +# download script +# curl -o enable-monitoring.sh -L https://aka.ms/upgrade-monitoring-bash-script +# 1. Using Service Principal for Azure Login +## bash upgrade-monitoring.sh --client-id --client-secret --tenant-id +# 2. Using Interactive device login +# bash upgrade-monitoring.sh --resource-id + +set -e +set -o pipefail + +# released chart version for Azure Arc enabled Kubernetes public preview +mcrChartVersion="2.7.6" +mcr="mcr.microsoft.com" +mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" + +# default to public cloud since only supported cloud is azure public clod +defaultAzureCloud="AzureCloud" +helmLocalRepoName="." +helmChartName="azuremonitor-containers" + +# default release name used during onboarding +releaseName="azmon-containers-release-1" + +# resource provider for azure arc connected cluster +arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" + +# default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource +resourceProvider="Microsoft.Kubernetes/connectedClusters" + +# Azure Arc enabled Kubernetes cluster resource +isArcK8sCluster=false + +# openshift project name for aro v4 cluster +openshiftProjectName="azure-monitor-for-containers" + +# Azure Arc enabled Kubernetes cluster resource +isAroV4Cluster=false + +# default global params +clusterResourceId="" +kubeconfigContext="" + +# default workspace region and code +workspaceRegion="eastus" +workspaceRegionCode="EUS" +workspaceResourceGroup="DefaultResourceGroup-"$workspaceRegionCode + +# default workspace guid and key +workspaceGuid="" +workspaceKey="" + +# sp details for the login if provided +servicePrincipalClientId="" +servicePrincipalClientSecret="" +servicePrincipalTenantId="" +isUsingServicePrincipal=false + +usage() { + local basename=$(basename $0) + echo + echo "Upgrade Azure Monitor for containers:" + echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ]" +} + +parse_args() { + + if [ $# -le 1 ]; then + usage + exit 1 + fi + + # Transform long options to short ones + for arg in "$@"; do + shift + case "$arg" in + "--resource-id") set -- "$@" "-r" ;; + "--kube-context") set -- "$@" "-k" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-s" ;; + "--tenant-id") set -- "$@" "-t" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" ;; + esac + done + + local OPTIND opt + + while getopts 'hk:r:c:s:t:' opt; do + case "$opt" in + h) + usage + ;; + + k) + kubeconfigContext="$OPTARG" + echo "name of kube-context is $OPTARG" + ;; + + r) + clusterResourceId="$OPTARG" + echo "clusterResourceId is $OPTARG" + ;; + + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND - 1))" + + local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" + local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" + + # get resource parts and join back to get the provider name + local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" + local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" + local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2})" + + local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" + + # convert to lowercase for validation + providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") + + echo "cluster SubscriptionId:" $subscriptionId + echo "cluster ResourceGroup:" $resourceGroup + echo "cluster ProviderName:" $providerName + echo "cluster Name:" $clusterName + + if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then + echo "-e invalid cluster resource id. Please try with valid fully qualified resource id of the cluster" + exit 1 + fi + + if [[ $providerName != microsoft.* ]]; then + echo "-e invalid azure cluster resource id format." + exit 1 + fi + + # detect the resource provider from the provider name in the cluster resource id + if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" + isArcK8sCluster=true + resourceProvider=$arcK8sResourceProvider + elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then + echo "provider cluster resource is of AROv4 cluster type" + resourceProvider=$aroV4ResourceProvider + isAroV4Cluster=true + elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then + echo "provider cluster resource is of AKS cluster type" + isAksCluster=true + resourceProvider=$aksResourceProvider + else + echo "-e unsupported azure managed cluster type" + exit 1 + fi + + if [ -z "$kubeconfigContext" ]; then + echo "using or getting current kube config context since --kube-context parameter not set " + fi + + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi +} + +configure_to_public_cloud() { + echo "Set AzureCloud as active cloud for az cli" + az cloud set -n $defaultAzureCloud +} + +validate_cluster_identity() { + echo "validating cluster identity" + + local rgName="$(echo ${1})" + local clusterName="$(echo ${2})" + + local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') + echo "cluster identity type:" $identitytype + + if [[ "$identitytype" != "systemassigned" ]]; then + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + + echo "successfully validated the identity of the cluster" +} + +validate_monitoring_tags() { + echo "get loganalyticsworkspaceResourceId tag on to cluster resource" + logAnalyticsWorkspaceResourceIdTag=$(az resource show --query tags.logAnalyticsWorkspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + echo "configured log analytics workspace: ${logAnalyticsWorkspaceResourceIdTag}" + echo "successfully got logAnalyticsWorkspaceResourceId tag on the cluster resource" + if [ -z "$logAnalyticsWorkspaceResourceIdTag" ]; then + echo "-e logAnalyticsWorkspaceResourceId doesnt exist on this cluster which indicates cluster not enabled for monitoring" + exit 1 + fi +} + + +upgrade_helm_chart_release() { + + # get the config-context for ARO v4 cluster + if [ "$isAroV4Cluster" = true ]; then + echo "getting config-context of ARO v4 cluster " + echo "getting admin user creds for aro v4 cluster" + adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) + adminPassword=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminPassword' -o tsv) + apiServer=$(az aro show -g $clusterResourceGroup -n $clusterName --query apiserverProfile.url -o tsv) + echo "login to the cluster via oc login" + oc login $apiServer -u $adminUserName -p $adminPassword + echo "creating project azure-monitor-for-containers" + oc new-project $openshiftProjectName + echo "getting config-context of aro v4 cluster" + kubeconfigContext=$(oc config current-context) + fi + + if [ -z "$kubeconfigContext" ]; then + echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." + else + echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." + fi + + export HELM_EXPERIMENTAL_OCI=1 + + echo "pull the chart from ${mcr}/${mcrChartRepoPath}:${mcrChartVersion}" + helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} + + echo "export the chart from local cache to current directory" + helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + + helmChartRepoPath=$helmLocalRepoName/$helmChartName + + echo "upgrading the release: $releaseName to chart version : ${mcrChartVersion}" + helm get values $releaseName -o yaml | helm upgrade --install $releaseName $helmChartRepoPath -f - + echo "$releaseName got upgraded successfully." +} + +login_to_azure() { + if [ "$isUsingServicePrincipal" = true ]; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + else + echo "login to the azure interactively" + az login --use-device-code + fi +} + +set_azure_subscription() { + local subscriptionId="$(echo ${1})" + echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" + az account set -s ${subscriptionId} + echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" +} + +# parse and validate args +parse_args $@ + +# configure azure cli for public cloud +configure_to_public_cloud + +# parse cluster resource id +clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" +clusterResourceGroup="$(echo $clusterResourceId | cut -d'/' -f5)" +providerName="$(echo $clusterResourceId | cut -d'/' -f7)" +clusterName="$(echo $clusterResourceId | cut -d'/' -f9)" + +# login to azure +login_to_azure + +# set the cluster subscription id as active sub for azure cli +set_azure_subscription $clusterSubscriptionId + +# validate cluster identity if its Azure Arc enabled Kubernetes cluster +if [ "$isArcK8sCluster" = true ]; then + validate_cluster_identity $clusterResourceGroup $clusterName +fi + +# validate the cluster has monitoring tags +validate_monitoring_tags + +# upgrade helm chart release +upgrade_helm_chart_release + +# portal link +echo "Proceed to https://aka.ms/azmon-containers to view health of your newly onboarded cluster" diff --git a/scripts/onboarding/solution-onboarding.md b/scripts/onboarding/solution-onboarding.md index 045738762..13e76530d 100644 --- a/scripts/onboarding/solution-onboarding.md +++ b/scripts/onboarding/solution-onboarding.md @@ -6,8 +6,8 @@ You can either use the Azure Powershell or Azure cli to deploy the solution. If you are not familiar with the concepts of deploying resources using a template with PowerShell, see [Deploy resources with Resource Manager templates and Azure PowerShell](https://review.docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-deploy) 1. Get the below template files to your local computer. - - Template file [azuremonitor-containerSolution.json](https://github.com/Microsoft/OMS-docker/blob/ci_feature_prod/docs/templates/azuremonitor-containerSolution.json) - - TemplateParams file [azuremonitor-containerSolutionParams.json](https://github.com/Microsoft/OMS-docker/blob/ci_feature_prod/docs/templates/azuremonitor-containerSolutionParams.json) + - Template file [azuremonitor-containerSolution.json](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json) + - TemplateParams file [azuremonitor-containerSolutionParams.json](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolutionParams.json) 2. Edit the TemplateParams file in your local computer. * workspaceResourceId parameter : - Replace `` with Azure subscriptionID for your Workspace diff --git a/scripts/preview/health/HealthAgentOnboarding.ps1 b/scripts/preview/health/HealthAgentOnboarding.ps1 index 881dd2549..9ce8eca74 100644 --- a/scripts/preview/health/HealthAgentOnboarding.ps1 +++ b/scripts/preview/health/HealthAgentOnboarding.ps1 @@ -339,7 +339,7 @@ if ($false -eq $isSolutionOnboarded) { try { New-AzResourceGroupDeployment -Name $DeploymentName ` -ResourceGroupName $workspaceResourceGroupName ` - -TemplateUri https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/templates/azuremonitor-containerSolution.json ` + -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` -TemplateParameterObject $Parameters -ErrorAction Stop` diff --git a/scripts/troubleshoot/README.md b/scripts/troubleshoot/README.md index d4e2e9cf4..5ffa07639 100644 --- a/scripts/troubleshoot/README.md +++ b/scripts/troubleshoot/README.md @@ -7,7 +7,7 @@ The table below summarizes known issues you may face while using Azure Monitor f | ---- | --- | | Error Message `No data for selected filters` | It may take some time to establish monitoring data flow for newly created clusters. Please allow at least 10-15 minutes for data to appear for your cluster. | | Error Message `Error retrieving data` | While Azure Kubenetes Service cluster is setting up for health and performance monitoring, a connection is established between the cluster and Azure Log Analytics workspace. Log Analytics workspace is used to store all monitoring data for your cluster. This error may occurr when your Log Analytics workspace has been deleted or lost. Please check whether your Log Analytics workspace is available. To find your Log Analytics workspace go [here.](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-manage-access) and your workspace is available. If the workspace is missing, you will need to re-onboard Container Health to your cluster. To re-onboard, you will need to [opt out](https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-optout) of monitoring for the cluster and [onboard](https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-enable-existing-clusters) again to Container Health. | -| `Error retrieving data` after adding Container Health through az aks cli | When onboarding using az aks cli, very seldom, Container Health may not be properly onboarded. Please check whether the Container Insights Solution is onboarded. To do this, go to your [Log Analytics workspace](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-manage-access) and see if Container Insights Solution is available by going to the "Solutions" tab under General. To resolve this issue, you will need to redeploy the Container Insights Solution. Please follow the instructions on [how to deploy Azure Monitor - container health solution to your Log Analytics workspace. ](https://github.com/Microsoft/OMS-docker/blob/ci_feature_prod/docs/solution-onboarding.md) | +| `Error retrieving data` after adding Container Health through az aks cli | When onboarding using az aks cli, very seldom, Container Health may not be properly onboarded. Please check whether the Container Insights Solution is onboarded. To do this, go to your [Log Analytics workspace](https://docs.microsoft.com/en-us/azure/log-analytics/log-analytics-manage-access) and see if Container Insights Solution is available by going to the "Solutions" tab under General. To resolve this issue, you will need to redeploy the Container Insights Solution. Please follow the instructions on [how to deploy Azure Monitor - container health solution to your Log Analytics workspace. ](https://github.com/microsoft/Docker-Provider/blob/ci_prod/scripts/onboarding/solution-onboarding.md) | | Failed to `Enable fast alerting experience on basic metrics for this Azure Kubernetes Services cluster` | The action is trying to grant the Monitoring Metrics Publisher role assignment on the cluster resource. The user initiating the process must have access to the **Microsoft.Authorization/roleAssignments/write** permission on the AKS cluster resource scope. Only members of the **Owner** and **User Access Administrator** built-in roles are granted access to this permission. If your security policies require assigning granular level permissions, we recommend you view [custom roles](https://docs.microsoft.com/en-us/azure/role-based-access-control/custom-roles) and assign it to the users who require it. | # Azure Red Hat OpenShift Service (ARO) @@ -36,7 +36,7 @@ Prequisites: # AKS or ARO -You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/TroubleshootError.ps1) to diagnose the problem. +You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError.ps1) to diagnose the problem. Steps: - Open powershell using the [cloudshell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview) in the azure portal. @@ -45,8 +45,8 @@ Steps: For Mac OS, refer [install-powershell-core-on-mac](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-macos?view=powershell-6) how to install powershell - Make sure that you're using powershell (selected by default) - Run the following command to change home directory - `cd ~` -- Run the following command to download the script - `curl -LO https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/Troubleshoot/TroubleshootError.ps1` - > Note: In some versions of Powershell above CURL command may not work in such cases, you can try `curl https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature/Troubleshoot/TroubleshootError.ps1 -O TroubleshootError.ps1` +- Run the following command to download the script - `curl -LO https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError.ps1` + > Note: In some versions of Powershell above CURL command may not work in such cases, you can try `curl https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError.ps1 -O TroubleshootError.ps1` - Run the following command to execute the script - `./TroubleshootError.ps1 -ClusterResourceId ` > Note: For AKS, resourceIdoftheCluster should be in this format `/subscriptions//resourceGroups//providers/Microsoft.ContainerService/managedClusters/`.For ARO, should be in this format `/subscriptions//resourceGroups//providers/Microsoft.ContainerService/openShiftManagedClusters/`. - This script will generate a TroubleshootDump.txt which collects detailed information about container health onboarding. @@ -54,10 +54,10 @@ Steps: # Aks-Engine Kubernetes -You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/TroubleshootError_AcsEngine.ps1) to diagnose the problem. +You can use the troubleshooting script provided [here](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1) to diagnose the problem. Steps: -- Download [TroubleshootError_AcsEngine.ps1](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/TroubleshootError_AcsEngine.ps1), [ContainerInsightsSolution.json](https://raw.githubusercontent.com/microsoft/OMS-docker/ci_feature_prod/Troubleshoot/ContainerInsightsSolution.json) +- Download [TroubleshootError_AcsEngine.ps1](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/TroubleshootError_AcsEngine.ps1), [ContainerInsightsSolution.json](https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/troubleshoot/ContainerInsightsSolution.json) - Collect Subscription ID, Resource group name of the Aks-Engine Kubernetes cluster - Use the following command to run the script : `.\TroubleshootError_AcsEngine.ps1 -SubscriptionId -ResourceGroupName `. This script will generate a TroubleshootDump.txt which collects detailed information about container health onboarding. diff --git a/scripts/troubleshoot/TroubleshootError.ps1 b/scripts/troubleshoot/TroubleshootError.ps1 index 7f857caa3..4c2d95ac6 100644 --- a/scripts/troubleshoot/TroubleshootError.ps1 +++ b/scripts/troubleshoot/TroubleshootError.ps1 @@ -234,7 +234,17 @@ $MdmCustomMetricAvailabilityLocations = ( 'eastasia', 'centralindia', 'uksouth', - 'canadacentral' + 'canadacentral', + 'francecentral', + 'japaneast', + 'australiaeast', + 'eastus2', + 'westus', + 'australiasoutheast', + 'brazilsouth', + 'germanywestcentral', + 'northcentralus', + 'switzerlandnorth' ); try { @@ -671,7 +681,7 @@ else { try { New-AzResourceGroupDeployment -Name $DeploymentName ` -ResourceGroupName $workspaceResourceGroupName ` - -TemplateUri https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/templates/azuremonitor-containerSolution.json ` + -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` -TemplateParameterObject $Parameters -ErrorAction Stop` Write-Host("") diff --git a/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 b/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 index c7509a940..14b080b23 100644 --- a/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 +++ b/scripts/troubleshoot/TroubleshootError_nonAzureK8s.ps1 @@ -345,7 +345,7 @@ else { try { New-AzResourceGroupDeployment -Name $DeploymentName ` -ResourceGroupName $defaultWorkspaceResourceGroup ` - -TemplateUri https://raw.githubusercontent.com/Microsoft/OMS-docker/ci_feature/docs/templates/azuremonitor-containerSolution.json ` + -TemplateUri https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_prod/scripts/onboarding/templates/azuremonitor-containerSolution.json ` -TemplateParameterObject $Parameters -ErrorAction Stop` Write-Host("") diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 42ecfcaf0..9e0935480 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -20,6 +20,7 @@ class CAdvisorMetricsAPIClient @clusterEnvVarCollectionEnabled = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] @clusterStdErrLogCollectionEnabled = ENV["AZMON_COLLECT_STDERR_LOGS"] @clusterStdOutLogCollectionEnabled = ENV["AZMON_COLLECT_STDOUT_LOGS"] + @pvKubeSystemCollectionMetricsEnabled = ENV["AZMON_PV_COLLECT_KUBE_SYSTEM_METRICS"] @clusterLogTailExcludPath = ENV["AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH"] @clusterLogTailPath = ENV["AZMON_LOG_TAIL_PATH"] @clusterAgentSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] @@ -33,6 +34,8 @@ class CAdvisorMetricsAPIClient @cAdvisorMetricsSecurePort = ENV["IS_SECURE_CADVISOR_PORT"] @containerLogsRoute = ENV["AZMON_CONTAINER_LOGS_ROUTE"] @hmEnabled = ENV["AZMON_CLUSTER_ENABLE_HEALTH_MODEL"] + @npmIntegrationBasic = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_BASIC"] + @npmIntegrationAdvanced = ENV["TELEMETRY_NPM_INTEGRATION_METRICS_ADVANCED"] @LogPath = "/var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt" @Log = Logger.new(@LogPath, 2, 10 * 1048576) #keep last 2 files, max log file size = 10M @@ -51,6 +54,7 @@ class CAdvisorMetricsAPIClient @@winNodePrevMetricRate = {} @@telemetryCpuMetricTimeTracker = DateTime.now.to_time.to_i @@telemetryMemoryMetricTimeTracker = DateTime.now.to_time.to_i + @@telemetryPVKubeSystemMetricsTimeTracker = DateTime.now.to_time.to_i #Containers a hash of node name and the last time telemetry was sent for this node @@nodeTelemetryTimeTracker = {} @@ -250,7 +254,13 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met #telemetry about health model if (!@hmEnabled.nil? && !@hmEnabled.empty?) telemetryProps["hmEnabled"] = @hmEnabled - end + end + #telemetry for npm integration + if (!@npmIntegrationAdvanced.nil? && !@npmIntegrationAdvanced.empty?) + telemetryProps["int-npm-a"] = "1" + elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) + telemetryProps["int-npm-b"] = "1" + end ApplicationInsightsUtility.sendMetricTelemetry(metricNametoReturn, metricValue, telemetryProps) end end @@ -293,6 +303,8 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime)) metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed","containerGpumemoryUsedBytes", metricTime)) metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle","containerGpuDutyCycle", metricTime)) + + metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime)) else @Log.warn("Couldn't get Insights metrics information for host: #{hostName} os:#{operatingSystem}") end @@ -303,6 +315,80 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) return metricDataItems end + def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metricNameToReturn, metricPollTime) + telemetryTimeDifference = (DateTime.now.to_time.to_i - @@telemetryPVKubeSystemMetricsTimeTracker).abs + telemetryTimeDifferenceInMinutes = telemetryTimeDifference / 60 + + metricItems = [] + clusterId = KubernetesApiClient.getClusterId + clusterName = KubernetesApiClient.getClusterName + begin + metricInfo = metricJSON + metricInfo["pods"].each do |pod| + + podNamespace = pod["podRef"]["namespace"] + excludeNamespace = false + if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" + excludeNamespace = true + end + + if (!excludeNamespace && !pod["volume"].nil?) + pod["volume"].each do |volume| + if (!volume["pvcRef"].nil?) + pvcRef = volume["pvcRef"] + if (!pvcRef["name"].nil?) + + # A PVC exists on this volume + podUid = pod["podRef"]["uid"] + podName = pod["podRef"]["name"] + pvcName = pvcRef["name"] + pvcNamespace = pvcRef["namespace"] + + metricItem = {} + metricItem["CollectionTime"] = metricPollTime + metricItem["Computer"] = hostName + metricItem["Name"] = metricNameToReturn + metricItem["Value"] = volume[metricNameToCollect] + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGTHTSMETRICS_TAGS_PV_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID ] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_UID] = podUid + metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = podName + metricTags[Constants::INSIGHTSMETRICS_TAGS_PVC_NAME] = pvcName + metricTags[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = pvcNamespace + metricTags[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] = volume["name"] + metricTags[Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] = volume["capacityBytes"] + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) + end + end + end + end + end + rescue => errorStr + @Log.warn("getPersistentVolumeMetrics failed: #{errorStr} for metric #{metricNameToCollect}") + return metricItems + end + + # If kube-system metrics collection enabled, send telemetry + begin + if telemetryTimeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES && @pvKubeSystemCollectionMetricsEnabled == "true" + ApplicationInsightsUtility.sendCustomEvent(Constants::PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT, {}) + @@telemetryPVKubeSystemMetricsTimeTracker = DateTime.now.to_time.to_i + end + rescue => errorStr + @Log.warn("getPersistentVolumeMetrics kube-system metrics enabled telemetry failed: #{errorStr}") + end + + return metricItems + end + + def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime) metricItems = [] clusterId = KubernetesApiClient.getClusterId diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 987d290aa..073eb0417 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -99,7 +99,6 @@ def getResourceUri(resource, api_group) elsif api_group == @@ApiGroupHPA return "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}/apis/" + @@ApiGroupHPA + "/" + @@ApiVersionHPA + "/" + resource end - else @Log.warn ("Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{ENV["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{ENV["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri") return nil @@ -720,6 +719,9 @@ def getMetricNumericValue(metricName, metricVal) if (metricValue.end_with?("m")) metricValue.chomp!("m") metricValue = Float(metricValue) * 1000.0 ** 2 + elsif (metricValue.end_with?("k")) + metricValue.chomp!("k") + metricValue = Float(metricValue) * 1000.0 else #assuming no units specified, it is cores that we are converting to nanocores (the below conversion will fail for other unsupported 'units') metricValue = Float(metricValue) * 1000.0 ** 3 end @@ -743,7 +745,7 @@ def getResourcesAndContinuationToken(uri, api_group: nil) resourceInventory = nil begin @Log.info "KubernetesApiClient::getResourcesAndContinuationToken : Getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" - resourceInfo = getKubeResourceInfo(uri, api_group:api_group) + resourceInfo = getKubeResourceInfo(uri, api_group: api_group) @Log.info "KubernetesApiClient::getResourcesAndContinuationToken : Done getting resources from Kube API using url: #{uri} @ #{Time.now.utc.iso8601}" if !resourceInfo.nil? @Log.info "KubernetesApiClient::getResourcesAndContinuationToken:Start:Parsing data for #{uri} using yajl @ #{Time.now.utc.iso8601}" @@ -761,5 +763,19 @@ def getResourcesAndContinuationToken(uri, api_group: nil) end return continuationToken, resourceInventory end #getResourcesAndContinuationToken + + def getKubeAPIServerUrl + apiServerUrl = nil + begin + if ENV["KUBERNETES_SERVICE_HOST"] && ENV["KUBERNETES_PORT_443_TCP_PORT"] + apiServerUrl = "https://#{ENV["KUBERNETES_SERVICE_HOST"]}:#{ENV["KUBERNETES_PORT_443_TCP_PORT"]}" + else + @Log.warn "Kubernetes environment variable not set KUBERNETES_SERVICE_HOST: #{ENV["KUBERNETES_SERVICE_HOST"]} KUBERNETES_PORT_443_TCP_PORT: #{ENV["KUBERNETES_PORT_443_TCP_PORT"]}. Unable to form resourceUri" + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getKubeAPIServerUrl:Failed #{errorStr}" + end + return apiServerUrl + end end end diff --git a/source/plugins/ruby/MdmAlertTemplates.rb b/source/plugins/ruby/MdmAlertTemplates.rb index 2e516a99d..ef63cf219 100644 --- a/source/plugins/ruby/MdmAlertTemplates.rb +++ b/source/plugins/ruby/MdmAlertTemplates.rb @@ -90,6 +90,40 @@ class MdmAlertTemplates } }' + PV_resource_utilization_template = ' + { + "time": "%{timestamp}", + "data": { + "baseData": { + "metric": "%{metricName}", + "namespace": "insights.container/persistentvolumes", + "dimNames": [ + "podName", + "node", + "kubernetesNamespace", + "volumeName", + "thresholdPercentage" + ], + "series": [ + { + "dimValues": [ + "%{podNameDimValue}", + "%{computerNameDimValue}", + "%{namespaceDimValue}", + "%{volumeNameDimValue}", + "%{thresholdPercentageDimValue}" + ], + "min": %{pvResourceUtilizationPercentage}, + "max": %{pvResourceUtilizationPercentage}, + "sum": %{pvResourceUtilizationPercentage}, + "count": 1 + } + ] + } + } + }' + + Node_resource_metrics_template = ' { "time": "%{timestamp}", diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 3d75dc6f4..12d462e44 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -8,9 +8,11 @@ class MdmMetricsGenerator require_relative "MdmAlertTemplates" require_relative "ApplicationInsightsUtility" require_relative "constants" + require_relative "oms_common" @log_path = "/var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log" @log = Logger.new(@log_path, 1, 5000000) + @@hostName = (OMS::Common.get_hostname) @oom_killed_container_count_hash = {} @container_restart_count_hash = {} @@ -37,8 +39,13 @@ class MdmMetricsGenerator Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC, } + @@pod_metric_name_metric_percentage_name_hash = { + Constants::PV_USED_BYTES => Constants::MDM_PV_UTILIZATION_METRIC, + } + # Setting this to true since we need to send zero filled metrics at startup. If metrics are absent alert creation fails @sendZeroFilledMetrics = true + @zeroFilledMetricsTimeTracker = DateTime.now.to_time.to_i def initialize end @@ -175,6 +182,20 @@ def zeroFillMetricRecords(records, batch_time) if !containerMemoryWorkingSetRecord.nil? && !containerMemoryWorkingSetRecord.empty? && !containerMemoryWorkingSetRecord[0].nil? && !containerMemoryWorkingSetRecord[0].empty? records.push(containerMemoryWorkingSetRecord[0]) end + + pvZeroFillDims = {} + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = Constants::OMSAGENT_ZERO_FILL + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] = Constants::VOLUME_NAME_ZERO_FILL + pvResourceUtilMetricRecord = getPVResourceUtilMetricRecords(batch_time, + Constants::PV_USED_BYTES, + @@hostName, + 0, + pvZeroFillDims, + metric_threshold_hash[Constants::PV_USED_BYTES]) + if !pvResourceUtilMetricRecord.nil? && !pvResourceUtilMetricRecord.empty? && !pvResourceUtilMetricRecord[0].nil? && !pvResourceUtilMetricRecord[0].empty? + records.push(pvResourceUtilMetricRecord[0]) + end rescue => errorStr @log.info "Error in zeroFillMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -185,10 +206,13 @@ def zeroFillMetricRecords(records, batch_time) def appendAllPodMetrics(records, batch_time) begin @log.info "in appendAllPodMetrics..." - if @sendZeroFilledMetrics == true + timeDifference = (DateTime.now.to_time.to_i - @zeroFilledMetricsTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if @sendZeroFilledMetrics == true || (timeDifferenceInMinutes >= Constants::ZERO_FILL_METRICS_INTERVAL_IN_MINUTES) records = zeroFillMetricRecords(records, batch_time) # Setting it to false after startup @sendZeroFilledMetrics = false + @zeroFilledMetricsTimeTracker = DateTime.now.to_time.to_i end records = appendPodMetrics(records, Constants::MDM_OOM_KILLED_CONTAINER_COUNT, @@ -259,6 +283,33 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag return records end + def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percentageMetricValue, dims, thresholdPercentage) + records = [] + begin + containerName = dims[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] + pvcNamespace = dims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] + podName = dims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] + podUid = dims[Constants::INSIGHTSMETRICS_TAGS_POD_UID] + volumeName = dims[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] + + resourceUtilRecord = MdmAlertTemplates::PV_resource_utilization_template % { + timestamp: recordTimeStamp, + metricName: @@pod_metric_name_metric_percentage_name_hash[metricName], + podNameDimValue: podName, + computerNameDimValue: computer, + namespaceDimValue: pvcNamespace, + volumeNameDimValue: volumeName, + pvResourceUtilizationPercentage: percentageMetricValue, + thresholdPercentageDimValue: thresholdPercentage, + } + records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + rescue => errorStr + @log.info "Error in getPVResourceUtilMetricRecords: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + return records + end + def getDiskUsageMetricRecords(record) records = [] usedPercent = nil @@ -296,22 +347,22 @@ def getMetricRecords(record) begin dimNames = String.new "" #mutable string dimValues = String.new "" - noDimVal ="-" + noDimVal = "-" metricValue = 0 if !record["tags"].nil? - dimCount = 0 - record["tags"].each { |k, v| - dimCount = dimCount+1 - if (dimCount <= 10) #MDM = 10 dims - dimNames.concat("\"#{k}\"") - dimNames.concat(",") - if !v.nil? && v.length >0 - dimValues.concat("\"#{v}\"") - else - dimValues.concat("\"#{noDimVal}\"") - end - dimValues.concat(",") + dimCount = 0 + record["tags"].each { |k, v| + dimCount = dimCount + 1 + if (dimCount <= 10) #MDM = 10 dims + dimNames.concat("\"#{k}\"") + dimNames.concat(",") + if !v.nil? && v.length > 0 + dimValues.concat("\"#{v}\"") + else + dimValues.concat("\"#{noDimVal}\"") end + dimValues.concat(",") + end } if (dimNames.end_with?(",")) dimNames.chomp!(",") @@ -324,19 +375,19 @@ def getMetricRecords(record) convertedTimestamp = Time.at(timestamp.to_i).utc.iso8601 if !record["fields"].nil? record["fields"].each { |k, v| - if is_numeric(v) - metricRecord = MdmAlertTemplates::Generic_metric_template % { - timestamp: convertedTimestamp, - metricName: k, - namespaceSuffix: record["name"], - dimNames: dimNames, - dimValues: dimValues, - metricValue: v, - } - records.push(Yajl::Parser.parse(StringIO.new(metricRecord))) - #@log.info "pushed mdmgenericmetric: #{k},#{v}" - end - } + if is_numeric(v) + metricRecord = MdmAlertTemplates::Generic_metric_template % { + timestamp: convertedTimestamp, + metricName: k, + namespaceSuffix: record["name"], + dimNames: dimNames, + dimValues: dimValues, + metricValue: v, + } + records.push(Yajl::Parser.parse(StringIO.new(metricRecord))) + #@log.info "pushed mdmgenericmetric: #{k},#{v}" + end + } end rescue => errorStr @log.info "getMetricRecords:Error: #{errorStr} for record #{record}" @@ -346,7 +397,7 @@ def getMetricRecords(record) end def is_numeric(o) - true if Float(o) rescue false + true if Float(o) rescue false end def getContainerResourceUtilizationThresholds @@ -356,6 +407,7 @@ def getContainerResourceUtilizationThresholds metric_threshold_hash[Constants::CPU_USAGE_NANO_CORES] = Constants::DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD metric_threshold_hash[Constants::MEMORY_RSS_BYTES] = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES] = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD + metric_threshold_hash[Constants::PV_USED_BYTES] = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD cpuThreshold = ENV["AZMON_ALERT_CONTAINER_CPU_THRESHOLD"] if !cpuThreshold.nil? && !cpuThreshold.empty? @@ -375,6 +427,12 @@ def getContainerResourceUtilizationThresholds memoryWorkingSetThresholdFloat = (memoryWorkingSetThreshold.to_f).round(2) metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES] = memoryWorkingSetThresholdFloat end + + pvUsagePercentageThreshold = ENV["AZMON_ALERT_PV_USAGE_THRESHOLD"] + if !pvUsagePercentageThreshold.nil? && !pvUsagePercentageThreshold.empty? + pvUsagePercentageThresholdFloat = (pvUsagePercentageThreshold.to_f).round(2) + metric_threshold_hash[Constants::PV_USED_BYTES] = pvUsagePercentageThresholdFloat + end rescue => errorStr @log.info "Error in getContainerResourceUtilizationThresholds: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb new file mode 100644 index 000000000..ef55c3257 --- /dev/null +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -0,0 +1,216 @@ +# frozen_string_literal: true +require "logger" +require "net/http" +require "net/https" +require "uri" +require "yajl/json_gem" +require "base64" +require "time" +require_relative "KubernetesApiClient" +require_relative "ApplicationInsightsUtility" + +class ArcK8sClusterIdentity + # this arc k8s crd version and arc k8s uses corresponding version v1beta1 vs v1 based on the k8s version for apiextensions.k8s.io + @@cluster_config_crd_api_version = "clusterconfig.azure.com/v1beta1" + @@cluster_identity_resource_name = "container-insights-clusteridentityrequest" + @@cluster_identity_resource_namespace = "azure-arc" + @@cluster_identity_token_secret_namespace = "azure-arc" + @@crd_resource_uri_template = "%{kube_api_server_url}/apis/%{cluster_config_crd_api_version}/namespaces/%{cluster_identity_resource_namespace}/azureclusteridentityrequests/%{cluster_identity_resource_name}" + @@secret_resource_uri_template = "%{kube_api_server_url}/api/v1/namespaces/%{cluster_identity_token_secret_namespace}/secrets/%{token_secret_name}" + @@azure_monitor_custom_metrics_audience = "https://monitoring.azure.com/" + @@cluster_identity_request_kind = "AzureClusterIdentityRequest" + + def initialize + @LogPath = "/var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log" + @log = Logger.new(@LogPath, 1, 5000000) + @log.info "initialize start @ #{Time.now.utc.iso8601}" + @token_expiry_time = Time.now + @cached_access_token = String.new + @token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" + @cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + @kube_api_server_url = KubernetesApiClient.getKubeAPIServerUrl + if @kube_api_server_url.nil? + @log.warn "got api server url nil from KubernetesApiClient.getKubeAPIServerUrl @ #{Time.now.utc.iso8601}" + end + @http_client = get_http_client + @service_account_token = get_service_account_token + @log.info "initialize complete @ #{Time.now.utc.iso8601}" + end + + def get_cluster_identity_token() + begin + # get the cluster msi identity token either if its empty or near expirty. Token is valid 24 hrs. + if @cached_access_token.to_s.empty? || (Time.now + 60 * 60 > @token_expiry_time) # Refresh token 1 hr from expiration + # renew the token if its near expiry + if !@cached_access_token.to_s.empty? && (Time.now + 60 * 60 > @token_expiry_time) + @log.info "renewing the token since its near expiry @ #{Time.now.utc.iso8601}" + renew_near_expiry_token + # sleep 60 seconds to get the renewed token available + sleep 60 + end + @log.info "get token reference from crd @ #{Time.now.utc.iso8601}" + tokenReference = get_token_reference_from_crd + if !tokenReference.nil? && !tokenReference.empty? + @token_expiry_time = Time.parse(tokenReference["expirationTime"]) + token_secret_name = tokenReference["secretName"] + token_secret_data_name = tokenReference["dataName"] + # get the token from secret + @log.info "get token from secret @ #{Time.now.utc.iso8601}" + token = get_token_from_secret(token_secret_name, token_secret_data_name) + if !token.nil? + @cached_access_token = token + else + @log.warn "got token nil from secret: #{@token_secret_name}" + end + else + @log.warn "got token reference either nil or empty" + end + end + rescue => err + @log.warn "get_cluster_identity_token failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return @cached_access_token + end + + private + + def get_token_from_secret(token_secret_name, token_secret_data_name) + token = nil + begin + secret_request_uri = @@secret_resource_uri_template % { + kube_api_server_url: @kube_api_server_url, + cluster_identity_token_secret_namespace: @@cluster_identity_token_secret_namespace, + token_secret_name: token_secret_name, + } + get_request = Net::HTTP::Get.new(secret_request_uri) + get_request["Authorization"] = "Bearer #{@service_account_token}" + @log.info "Making GET request to #{secret_request_uri} @ #{Time.now.utc.iso8601}" + get_response = @http_client.request(get_request) + @log.info "Got response of #{get_response.code} for #{secret_request_uri} @ #{Time.now.utc.iso8601}" + if get_response.code.to_i == 200 + token_secret = JSON.parse(get_response.body)["data"] + cluster_identity_token = token_secret[token_secret_data_name] + token = Base64.decode64(cluster_identity_token) + end + rescue => err + @log.warn "get_token_from_secret API call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return token + end + + private + + def get_token_reference_from_crd() + tokenReference = {} + begin + crd_request_uri = @@crd_resource_uri_template % { + kube_api_server_url: @kube_api_server_url, + cluster_config_crd_api_version: @@cluster_config_crd_api_version, + cluster_identity_resource_namespace: @@cluster_identity_resource_namespace, + cluster_identity_resource_name: @@cluster_identity_resource_name, + } + get_request = Net::HTTP::Get.new(crd_request_uri) + get_request["Authorization"] = "Bearer #{@service_account_token}" + @log.info "Making GET request to #{crd_request_uri} @ #{Time.now.utc.iso8601}" + get_response = @http_client.request(get_request) + @log.info "Got response of #{get_response.code} for #{crd_request_uri} @ #{Time.now.utc.iso8601}" + if get_response.code.to_i == 200 + status = JSON.parse(get_response.body)["status"] + tokenReference["expirationTime"] = status["expirationTime"] + tokenReference["secretName"] = status["tokenReference"]["secretName"] + tokenReference["dataName"] = status["tokenReference"]["dataName"] + end + rescue => err + @log.warn "get_token_reference_from_crd call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return tokenReference + end + + private + + def renew_near_expiry_token() + begin + crd_request_uri = @@crd_resource_uri_template % { + kube_api_server_url: @kube_api_server_url, + cluster_config_crd_api_version: @@cluster_config_crd_api_version, + cluster_identity_resource_namespace: @@cluster_identity_resource_namespace, + cluster_identity_resource_name: @@cluster_identity_resource_name, + } + crd_request_body = get_crd_request_body + crd_request_body_json = crd_request_body.to_json + update_request = Net::HTTP::Patch.new(crd_request_uri) + update_request["Content-Type"] = "application/merge-patch+json" + update_request["Authorization"] = "Bearer #{@service_account_token}" + update_request.body = crd_request_body_json + update_response = @http_client.request(update_request) + @log.info "Got response of #{update_response.code} for PATCH #{crd_request_uri} @ #{Time.now.utc.iso8601}" + if update_response.code.to_i == 404 + @log.info "since crd resource doesnt exist since creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" + create_request = Net::HTTP::Post.new(crd_request_uri) + create_request["Content-Type"] = "application/json" + create_request["Authorization"] = "Bearer #{@service_account_token}" + create_request.body = crd_request_body_json + create_response = @http_client.request(create_request) + @log.info "Got response of #{create_response.code} for POST #{crd_request_uri} @ #{Time.now.utc.iso8601}" + end + rescue => err + @log.warn "renew_near_expiry_token call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + end + + private + + def get_service_account_token() + begin + if File.exist?(@token_file_path) && File.readable?(@token_file_path) + token_str = File.read(@token_file_path).strip + return token_str + else + @log.warn "Unable to read token string from #{@token_file_path}" + return nil + end + rescue => err + @log.warn "get_service_account_token call failed: #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + end + + private + + def get_http_client() + begin + base_api_server_url = URI.parse(@kube_api_server_url) + http = Net::HTTP.new(base_api_server_url.host, base_api_server_url.port) + http.use_ssl = true + if !File.exist?(@cert_file_path) + raise "#{@cert_file_path} doesnt exist" + else + http.ca_file = @cert_file_path + end + http.verify_mode = OpenSSL::SSL::VERIFY_PEER + return http + rescue => err + @log.warn "Unable to create http client #{err}" + ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) + end + return nil + end + + private + + def get_crd_request_body + body = {} + body["apiVersion"] = @@cluster_config_crd_api_version + body["kind"] = @@cluster_identity_request_kind + body["metadata"] = {} + body["metadata"]["name"] = @@cluster_identity_resource_name + body["metadata"]["namespace"] = @@cluster_identity_resource_namespace + body["spec"] = {} + body["spec"]["audience"] = @@azure_monitor_custom_metrics_audience + return body + end +end diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index dd1ba24b3..35e5f9334 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -13,6 +13,13 @@ class Constants INSIGHTSMETRICS_TAGS_K8SNAMESPACE = "k8sNamespace" INSIGHTSMETRICS_TAGS_CONTROLLER_NAME = "controllerName" INSIGHTSMETRICS_TAGS_CONTROLLER_KIND = "controllerKind" + INSIGHTSMETRICS_TAGS_POD_UID = "podUid" + INSIGTHTSMETRICS_TAGS_PV_NAMESPACE = "container.azm.ms/pv" + INSIGHTSMETRICS_TAGS_PVC_NAME = "pvcName" + INSIGHTSMETRICS_TAGS_PVC_NAMESPACE = "pvcNamespace" + INSIGHTSMETRICS_TAGS_POD_NAME = "podName" + INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES = "pvCapacityBytes" + INSIGHTSMETRICS_TAGS_VOLUME_NAME = "volumeName" INSIGHTSMETRICS_FLUENT_TAG = "oms.api.InsightsMetrics" REASON_OOM_KILLED = "oomkilled" #Kubestate (common) @@ -45,33 +52,43 @@ class Constants MDM_CONTAINER_CPU_UTILIZATION_METRIC = "cpuExceededPercentage" MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" + MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" - CONTAINER_TERMINATED_RECENTLY_IN_MINUTES = 5 - OBJECT_NAME_K8S_CONTAINER = "K8SContainer" - OBJECT_NAME_K8S_NODE = "K8SNode" - CPU_USAGE_NANO_CORES = "cpuUsageNanoCores" - CPU_USAGE_MILLI_CORES = "cpuUsageMillicores" - MEMORY_WORKING_SET_BYTES= "memoryWorkingSetBytes" - MEMORY_RSS_BYTES = "memoryRssBytes" - DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD = 95.0 - DEFAULT_MDM_MEMORY_RSS_THRESHOLD = 95.0 - DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD = 95.0 - CONTROLLER_KIND_JOB = "job" - CONTAINER_TERMINATION_REASON_COMPLETED = "completed" - CONTAINER_STATE_TERMINATED = "terminated" - STALE_JOB_TIME_IN_MINUTES = 360 - TELEGRAF_DISK_METRICS = "container.azm.ms/disk" - OMSAGENT_ZERO_FILL = "omsagent" - KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" + CONTAINER_TERMINATED_RECENTLY_IN_MINUTES = 5 + OBJECT_NAME_K8S_CONTAINER = "K8SContainer" + OBJECT_NAME_K8S_NODE = "K8SNode" + CPU_USAGE_NANO_CORES = "cpuUsageNanoCores" + CPU_USAGE_MILLI_CORES = "cpuUsageMillicores" + MEMORY_WORKING_SET_BYTES = "memoryWorkingSetBytes" + MEMORY_RSS_BYTES = "memoryRssBytes" + PV_USED_BYTES = "pvUsedBytes" + DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD = 95.0 + DEFAULT_MDM_MEMORY_RSS_THRESHOLD = 95.0 + DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD = 95.0 + DEFAULT_MDM_PV_UTILIZATION_THRESHOLD = 60.0 + CONTROLLER_KIND_JOB = "job" + CONTAINER_TERMINATION_REASON_COMPLETED = "completed" + CONTAINER_STATE_TERMINATED = "terminated" + STALE_JOB_TIME_IN_MINUTES = 360 + TELEGRAF_DISK_METRICS = "container.azm.ms/disk" + OMSAGENT_ZERO_FILL = "omsagent" + KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" + VOLUME_NAME_ZERO_FILL = "-" - #Telemetry constants - CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" - POD_READY_PERCENTAGE_HEART_BEAT_EVENT = "PodReadyPercentageMdmHeartBeatEvent" - CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT = "ContainerResourceUtilMdmHeartBeatEvent" - TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 - KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 - MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" -end \ No newline at end of file + #Telemetry constants + CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" + POD_READY_PERCENTAGE_HEART_BEAT_EVENT = "PodReadyPercentageMdmHeartBeatEvent" + CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT = "ContainerResourceUtilMdmHeartBeatEvent" + PV_USAGE_HEART_BEAT_EVENT = "PVUsageMdmHeartBeatEvent" + PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT = "CollectPVKubeSystemMetricsEnabled" + TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 + KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 + ZERO_FILL_METRICS_INTERVAL_IN_MINUTES = 30 + MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" + + #Pod Statuses + POD_STATUS_TERMINATING = "Terminating" +end diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index fd43ef98b..3bc674ea8 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -16,7 +16,7 @@ class CAdvisor2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log" config_param :custom_metrics_azure_regions, :string - config_param :metrics_to_collect, :string, :default => "Constants::CPU_USAGE_NANO_CORES,Constants::MEMORY_WORKING_SET_BYTES,Constants::MEMORY_RSS_BYTES" + config_param :metrics_to_collect, :string, :default => "Constants::CPU_USAGE_NANO_CORES,Constants::MEMORY_WORKING_SET_BYTES,Constants::MEMORY_RSS_BYTES,Constants::PV_USED_BYTES" @@hostName = (OMS::Common.get_hostname) @@ -46,11 +46,13 @@ def start @metrics_to_collect_hash = build_metrics_hash @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i + @@pvUsageTelemetryTimeTracker = DateTime.now.to_time.to_i # These variables keep track if any resource utilization threshold exceeded in the last 10 minutes @containersExceededCpuThreshold = false @containersExceededMemRssThreshold = false @containersExceededMemWorkingSetThreshold = false + @pvExceededUsageThreshold = false # initialize cpu and memory limit if @process_incoming_stream @@ -60,6 +62,7 @@ def start @containerCpuLimitHash = {} @containerMemoryLimitHash = {} @containerResourceDimensionHash = {} + @pvUsageHash = {} @@metric_threshold_hash = MdmMetricsGenerator.getContainerResourceUtilizationThresholds end rescue => e @@ -87,6 +90,8 @@ def setThresholdExceededTelemetry(metricName) @containersExceededMemRssThreshold = true elsif metricName == Constants::MEMORY_WORKING_SET_BYTES @containersExceededMemWorkingSetThreshold = true + elsif metricName == Constants::PV_USED_BYTES + @pvExceededUsageThreshold = true end rescue => errorStr @log.info "Error in setThresholdExceededTelemetry: #{errorStr}" @@ -109,13 +114,30 @@ def flushMetricTelemetry properties["MemRssThresholdExceededInLastFlushInterval"] = @containersExceededMemRssThreshold properties["MemWSetThresholdExceededInLastFlushInterval"] = @containersExceededMemWorkingSetThreshold ApplicationInsightsUtility.sendCustomEvent(Constants::CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT, properties) - @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i @containersExceededCpuThreshold = false @containersExceededMemRssThreshold = false @containersExceededMemWorkingSetThreshold = false + @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i + end + rescue => errorStr + @log.info "Error in flushMetricTelemetry: #{errorStr} for container resource util telemetry" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + + # Also send for PV usage metrics + begin + pvTimeDifference = (DateTime.now.to_time.to_i - @@pvUsageTelemetryTimeTracker).abs + pvTimeDifferenceInMinutes = pvTimeDifference / 60 + if (pvTimeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + pvProperties = {} + pvProperties["PVUsageThresholdPercentage"] = @@metric_threshold_hash[Constants::PV_USED_BYTES] + pvProperties["PVUsageThresholdExceededInLastFlushInterval"] = @pvExceededUsageThreshold + ApplicationInsightsUtility.sendCustomEvent(Constants::PV_USAGE_HEART_BEAT_EVENT, pvProperties) + @pvExceededUsageThreshold = false + @@pvUsageTelemetryTimeTracker = DateTime.now.to_time.to_i end rescue => errorStr - @log.info "Error in flushMetricTelemetry: #{errorStr}" + @log.info "Error in flushMetricTelemetry: #{errorStr} for PV usage telemetry" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end end @@ -123,6 +145,13 @@ def flushMetricTelemetry def filter(tag, time, record) begin if @process_incoming_stream + + # Check if insights metrics for PV metrics + data_type = record["DataType"] + if data_type == "INSIGHTS_METRICS_BLOB" + return filterPVInsightsMetrics(record) + end + object_name = record["DataItems"][0]["ObjectName"] counter_name = record["DataItems"][0]["Collections"][0]["CounterName"] percentage_metric_value = 0.0 @@ -204,6 +233,47 @@ def filter(tag, time, record) end end + def filterPVInsightsMetrics(record) + begin + mdmMetrics = [] + record["DataItems"].each do |dataItem| + + if dataItem["Name"] == Constants::PV_USED_BYTES && @metrics_to_collect_hash.key?(dataItem["Name"].downcase) + metricName = dataItem["Name"] + usage = dataItem["Value"] + capacity = dataItem["Tags"][Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] + if capacity != 0 + percentage_metric_value = (usage * 100.0) / capacity + end + @log.info "percentage_metric_value for metric: #{metricName} percentage: #{percentage_metric_value}" + @log.info "@@metric_threshold_hash for #{metricName}: #{@@metric_threshold_hash[metricName]}" + + computer = dataItem["Computer"] + resourceDimensions = dataItem["Tags"] + thresholdPercentage = @@metric_threshold_hash[metricName] + + flushMetricTelemetry + if percentage_metric_value >= thresholdPercentage + setThresholdExceededTelemetry(metricName) + return MdmMetricsGenerator.getPVResourceUtilMetricRecords(dataItem["CollectionTime"], + metricName, + computer, + percentage_metric_value, + resourceDimensions, + thresholdPercentage) + else + return [] + end # end if block for percentage metric > configured threshold % check + end # end if block for dataItem name check + end # end for block of looping through data items + return [] + rescue Exception => e + @log.info "Error processing cadvisor insights metrics record Exception: #{e.class} Message: #{e.message}" + ApplicationInsightsUtility.sendExceptionTelemetry(e.backtrace) + return [] #return empty array if we ran into any errors + end + end + def ensure_cpu_memory_capacity_set if @cpu_capacity != 0.0 && @memory_capacity != 0.0 @log.info "CPU And Memory Capacity are already set" diff --git a/source/plugins/ruby/in_cadvisor_perf.rb b/source/plugins/ruby/in_cadvisor_perf.rb index a44365e9d..b706ff00a 100644 --- a/source/plugins/ruby/in_cadvisor_perf.rb +++ b/source/plugins/ruby/in_cadvisor_perf.rb @@ -88,6 +88,7 @@ def enumerate() end router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("cAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index bffa725ee..4880d80e7 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -194,6 +194,9 @@ def parse_and_emit_records(podInventory, serviceList, continuationToken, batchTi if podReadyCondition == false record["PodStatus"] = "Unknown" + # ICM - https://portal.microsofticm.com/imp/v3/incidents/details/187091803/home + elsif !items["metadata"]["deletionTimestamp"].nil? && !items["metadata"]["deletionTimestamp"].empty? + record["PodStatus"] = Constants::POD_STATUS_TERMINATING else record["PodStatus"] = items["status"]["phase"] end diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 38868f2f5..4e90195e5 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -101,6 +101,7 @@ def enumerate() end router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("winCAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end diff --git a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb index 33ac49286..bedbae4ee 100644 --- a/source/plugins/ruby/lib/application_insights/channel/sender_base.rb +++ b/source/plugins/ruby/lib/application_insights/channel/sender_base.rb @@ -66,12 +66,12 @@ def send(data_to_send) request.body = compressed_data if @proxy.nil? || @proxy.empty? http = Net::HTTP.new uri.hostname, uri.port - else + else http = Net::HTTP.new(uri.hostname, uri.port, @proxy[:addr], @proxy[:port], @proxy[:user], @proxy[:pass]) end if uri.scheme.downcase == 'https' http.use_ssl = true - http.verify_mode = OpenSSL::SSL::VERIFY_NONE + http.verify_mode = OpenSSL::SSL::VERIFY_PEER end response = http.request(request) diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index d801edb9a..1c805255a 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -16,6 +16,8 @@ def initialize require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" require_relative "constants" + require_relative "arc_k8s_cluster_identity" + require_relative "proxy_utils" @@token_resource_url = "https://monitoring.azure.com/" @@grant_type = "client_credentials" @@ -45,6 +47,8 @@ def initialize @useMsi = false @metrics_flushed_count = 0 + @cluster_identity = nil + @isArcK8sCluster = false @get_access_token_backoff_expiry = Time.now end @@ -57,15 +61,17 @@ def configure(conf) def start super begin - file = File.read(@@azure_json_path) - @data_hash = JSON.parse(file) aks_resource_id = ENV["AKS_RESOURCE_ID"] aks_region = ENV["AKS_REGION"] if aks_resource_id.to_s.empty? @log.info "Environment Variable AKS_RESOURCE_ID is not set.. " @can_send_data_to_mdm = false + elsif !aks_resource_id.downcase.include?("/microsoft.containerservice/managedclusters/") && !aks_resource_id.downcase.include?("/microsoft.kubernetes/connectedclusters/") + @log.info "MDM Metris not supported for this cluster type resource: #{aks_resource_id}" + @can_send_data_to_mdm = false end + if aks_region.to_s.empty? @log.info "Environment Variable AKS_REGION is not set.. " @can_send_data_to_mdm = false @@ -76,28 +82,51 @@ def start if @can_send_data_to_mdm @log.info "MDM Metrics supported in #{aks_region} region" + if aks_resource_id.downcase.include?("microsoft.kubernetes/connectedclusters") + @isArcK8sCluster = true + end @@post_request_url = @@post_request_url_template % { aks_region: aks_region, aks_resource_id: aks_resource_id } @post_request_uri = URI.parse(@@post_request_url) - @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + if (!!@isArcK8sCluster) + proxy = (ProxyUtils.getProxyConfiguration) + if proxy.nil? || proxy.empty? + @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + else + @log.info "Proxy configured on this cluster: #{aks_resource_id}" + @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port, proxy[:addr], proxy[:port], proxy[:user], proxy[:pass]) + end + else + @http_client = Net::HTTP.new(@post_request_uri.host, @post_request_uri.port) + end @http_client.use_ssl = true @log.info "POST Request url: #{@@post_request_url}" ApplicationInsightsUtility.sendCustomEvent("AKSCustomMetricsMDMPluginStart", {}) - # Check to see if SP exists, if it does use SP. Else, use msi - sp_client_id = @data_hash["aadClientId"] - sp_client_secret = @data_hash["aadClientSecret"] - - if (!sp_client_id.nil? && !sp_client_id.empty? && sp_client_id.downcase != "msi") - @useMsi = false - aad_token_url = @@aad_token_url_template % { tenant_id: @data_hash["tenantId"] } - @parsed_token_uri = URI.parse(aad_token_url) + # arc k8s cluster uses cluster identity + if (!!@isArcK8sCluster) + @log.info "using cluster identity token since cluster is azure arc k8s cluster" + @cluster_identity = ArcK8sClusterIdentity.new + @cached_access_token = @cluster_identity.get_cluster_identity_token else - @useMsi = true - msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } - @parsed_token_uri = URI.parse(msi_endpoint) - end + # azure json file only used for aks and doesnt exist in non-azure envs + file = File.read(@@azure_json_path) + @data_hash = JSON.parse(file) + # Check to see if SP exists, if it does use SP. Else, use msi + sp_client_id = @data_hash["aadClientId"] + sp_client_secret = @data_hash["aadClientSecret"] + + if (!sp_client_id.nil? && !sp_client_id.empty? && sp_client_id.downcase != "msi") + @useMsi = false + aad_token_url = @@aad_token_url_template % { tenant_id: @data_hash["tenantId"] } + @parsed_token_uri = URI.parse(aad_token_url) + else + @useMsi = true + msi_endpoint = @@msi_endpoint_template % { user_assigned_client_id: @@user_assigned_client_id, resource: @@token_resource_url } + @parsed_token_uri = URI.parse(msi_endpoint) + end - @cached_access_token = get_access_token + @cached_access_token = get_access_token + end end rescue => e @log.info "exception when initializing out_mdm #{e}" @@ -226,7 +255,14 @@ def write(chunk) def send_to_mdm(post_body) begin - access_token = get_access_token + if (!!@isArcK8sCluster) + if @cluster_identity.nil? + @cluster_identity = ArcK8sClusterIdentity.new + end + access_token = @cluster_identity.get_cluster_identity_token + else + access_token = get_access_token + end request = Net::HTTP::Post.new(@post_request_uri.request_uri) request["Content-Type"] = "application/x-ndjson" request["Authorization"] = "Bearer #{access_token}" @@ -241,7 +277,7 @@ def send_to_mdm(post_body) @last_telemetry_sent_time = Time.now end rescue Net::HTTPServerException => e - if !response.nil && !response.body.nil? #body will have actual error + if !response.nil? && !response.body.nil? #body will have actual error @log.info "Failed to Post Metrics to MDM : #{e} Response.body: #{response.body}" else @log.info "Failed to Post Metrics to MDM : #{e} Response: #{response}" From f1801321a39fc554c6fdfcc78743a28bef1cab11 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 27 Oct 2020 17:16:20 -0700 Subject: [PATCH 04/18] merge from ci_dev for ciprod10272020 release (#461) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart Co-authored-by: Ganga Mahesh Siddem Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner --- Health/onboarding_instructions.md | 24 +++- .../linux/installer/scripts/livenessprobe.sh | 14 +- .../templates/omsagent-daemonset-windows.yaml | 3 +- .../templates/omsagent-daemonset.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +- kubernetes/linux/main.sh | 123 ++++++++++++++---- kubernetes/linux/setup.sh | 4 +- kubernetes/omsagent.yaml | 10 +- .../onboarding/managed/enable-monitoring.ps1 | 2 +- .../onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- source/plugins/go/src/oms.go | 62 +++++---- source/plugins/go/src/utils.go | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 4 +- source/plugins/ruby/constants.rb | 2 + 15 files changed, 188 insertions(+), 74 deletions(-) diff --git a/Health/onboarding_instructions.md b/Health/onboarding_instructions.md index 9c07b2167..4c83577b5 100644 --- a/Health/onboarding_instructions.md +++ b/Health/onboarding_instructions.md @@ -6,12 +6,28 @@ For on-boarding to Health(Tab), you would need to complete two steps ## Configure agent through ConfigMap -1. Include the following section in ConfigMap yaml file -```cmd:agent-settings: |- - [agent_settings.health_model] +1. If you are configuring your existing ConfigMap, append the following section in your existing ConfigMap yaml file +``` +#Append this section in your existing configmap +agent-settings: |- + # agent health model feature settings + [agent_settings.health_model] + # In the absence of this configmap, default value for enabled is false + enabled = true +``` +2. Else if you don't have ConfigMap, download the new ConfigMap from [here.](https://github.com/microsoft/Docker-Provider/blob/ci_prod/kubernetes/container-azm-ms-agentconfig.yaml) & then set `enabled =true` + +``` +#For new downloaded configmap enabled this default setting to true +agent-settings: |- + # agent health model feature settings + [agent_settings.health_model] + # In the absence of this configmap, default value for enabled is false enabled = true ``` -2. Run the following kubectl command: + + +3. Run the following kubectl command: `kubectl apply -f ` Example: `kubectl apply -f container-azm-ms-agentconfig.yaml`. diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 87f68a560..e3f9fb475 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -4,15 +4,25 @@ (ps -ef | grep omsagent- | grep -v "grep") if [ $? -ne 0 ] then - echo "Agent is NOT running" > /dev/termination-log + echo " omsagent is not running" > /dev/termination-log exit 1 fi +#optionally test to exit non zero value if oneagent is not running +if [ -e "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" ]; then + (ps -ef | grep "mdsd -l" | grep -v "grep") + if [ $? -ne 0 ] + then + echo "oneagent is not running" > /dev/termination-log + exit 1 + fi +fi + #test to exit non zero value if fluentbit is not running (ps -ef | grep td-agent-bit | grep -v "grep") if [ $? -ne 0 ] then - echo "Fluentbit is NOT running" > /dev/termination-log + echo "Fluentbit is not running" > /dev/termination-log exit 1 fi diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index e65f9a98d..c916fadf6 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -46,7 +46,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonset-windows | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID @@ -96,6 +96,7 @@ spec: - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd periodSeconds: 60 initialDelaySeconds: 180 + timeoutSeconds: 15 {{- with .Values.omsagent.tolerations }} tolerations: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 438294ce5..8af13b6ee 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -40,7 +40,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonset-linux | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index f841dc5d7..fa01c05bd 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -116,13 +116,17 @@ omsagent: ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: - daemonset: + daemonset-linux: requests: cpu: 75m memory: 225Mi limits: cpu: 150m memory: 600Mi + daemonset-windows: + limits: + cpu: 200m + memory: 600Mi deployment: requests: cpu: 150m diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 11972f0f4..b093eb74b 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -416,6 +416,97 @@ echo "DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc +#region check to auto-activate oneagent, to route container logs, +#Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap +# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map +# AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic +echo "************start oneagent log routing checks************" +# by default, use configmap route for safer side +AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE + +#trim region list +oneagentregions="$(echo $AZMON_CONTAINERLOGS_ONEAGENT_REGIONS | xargs)" +#lowercase region list +typeset -l oneagentregions=$oneagentregions +echo "oneagent regions: $oneagentregions" +#trim current region +currentregion="$(echo $AKS_REGION | xargs)" +#lowercase current region +typeset -l currentregion=$currentregion +echo "current region: $currentregion" + +#initilze isoneagentregion as false +isoneagentregion=false + +#set isoneagentregion as true if matching region is found +if [ ! -z $oneagentregions ] && [ ! -z $currentregion ]; then + for rgn in $(echo $oneagentregions | sed "s/,/ /g"); do + if [ "$rgn" == "$currentregion" ]; then + isoneagentregion=true + echo "current region is in oneagent regions..." + break + fi + done +else + echo "current region is not in oneagent regions..." +fi + +if [ "$isoneagentregion" = true ]; then + #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route + if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE + echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + else #there is no configmap route, so route thru oneagent + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE="v2" + echo "oneagent region is true for current region:$currentregion and config map logs route is empty. so using oneagent as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + fi +else + echo "oneagent region is false for current region:$currentregion" +fi + + +#start oneagent +if [ ! -e "/etc/config/kube.conf" ]; then + if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then + echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" + echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" + #trim + containerlogsroute="$(echo $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE | xargs)" + # convert to lowercase + typeset -l containerlogsroute=$containerlogsroute + + echo "setting AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE as :$containerlogsroute" + export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute + echo "export AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$containerlogsroute" >> ~/.bashrc + source ~/.bashrc + + if [ "$containerlogsroute" == "v2" ]; then + echo "activating oneagent..." + echo "configuring mdsd..." + cat /etc/mdsd.d/envmdsd | while read line; do + echo $line >> ~/.bashrc + done + source /etc/mdsd.d/envmdsd + + echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" + export CIWORKSPACE_id=$CIWORKSPACE_id + echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc + export CIWORKSPACE_key=$CIWORKSPACE_key + echo "export CIWORKSPACE_key=$CIWORKSPACE_key" >> ~/.bashrc + + source ~/.bashrc + + dpkg -l | grep mdsd | awk '{print $2 " " $3}' + + echo "starting mdsd ..." + mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + + touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 + fi + fi +fi +echo "************end oneagent log routing checks************" + #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then if [ "$CONTAINER_RUNTIME" == "docker" ]; then @@ -491,37 +582,13 @@ dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' #dpkg -l | grep telegraf | awk '{print $2 " " $3}' -#start oneagent -if [ ! -e "/etc/config/kube.conf" ]; then - if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then - echo "container logs route is defined as $AZMON_CONTAINER_LOGS_ROUTE" - #trim - containerlogsroute="$(echo $AZMON_CONTAINER_LOGS_ROUTE | xargs)" - # convert to lowercase - typeset -l containerlogsroute=$containerlogsroute - if [ "$containerlogsroute" == "v2" ]; then - echo "containerlogsroute $containerlogsroute" - echo "configuring mdsd..." - cat /etc/mdsd.d/envmdsd | while read line; do - echo $line >> ~/.bashrc - done - source /etc/mdsd.d/envmdsd - echo "setting mdsd workspaceid & key for workspace:$CIWORKSPACE_id" - export CIWORKSPACE_id=$CIWORKSPACE_id - echo "export CIWORKSPACE_id=$CIWORKSPACE_id" >> ~/.bashrc - export CIWORKSPACE_key=$CIWORKSPACE_key - echo "export CIWORKSPACE_key=$CIWORKSPACE_key" >> ~/.bashrc - source ~/.bashrc +echo "stopping rsyslog..." +service rsyslog stop - dpkg -l | grep mdsd | awk '{print $2 " " $3}' - - echo "starting mdsd ..." - mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & - fi - fi -fi +echo "getting rsyslog status..." +service rsyslog status shutdown() { /opt/microsoft/omsagent/bin/service_control stop diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 67a981dfa..fb41d4782 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -31,8 +31,8 @@ mv $TMPDIR/omsbundle* $TMPDIR/omsbundle /usr/bin/dpkg -i $TMPDIR/omsbundle/110/omsagent*.deb #/usr/bin/dpkg -i $TMPDIR/omsbundle/100/omsconfig*.deb -#install oneagent - Latest dev bits (7/17) -wget https://github.com/microsoft/Docker-Provider/releases/download/7172020-oneagent/azure-mdsd_1.5.124-build.develop.1294_x86_64.deb +#install oneagent - Official bits (10/18) +wget https://github.com/microsoft/Docker-Provider/releases/download/10182020-oneagent/azure-mdsd_1.5.126-build.master.99_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 18bc203d4..61f89b808 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -347,7 +347,7 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 150m + cpu: 250m memory: 600Mi requests: cpu: 75m @@ -370,6 +370,8 @@ spec: # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" + - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS + value: "koreacentral,norwayeast" securityContext: privileged: true ports: @@ -650,11 +652,8 @@ spec: imagePullPolicy: IfNotPresent resources: limits: - cpu: 150m + cpu: 200m memory: 600Mi - requests: - cpu: 75m - memory: 225Mi env: # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - name: AKS_RESOURCE_ID @@ -696,6 +695,7 @@ spec: - C:\opt\omsagentwindows\scripts\cmd\livenessProbe.cmd periodSeconds: 60 initialDelaySeconds: 180 + timeoutSeconds: 15 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 1e1669400..4815dc958 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -60,7 +60,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.6" +$mcrChartVersion = "2.7.7" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index ce62a581a..d7edf49dc 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -42,7 +42,7 @@ set -o pipefail defaultAzureCloud="AzureCloud" # released chart version in mcr -mcrChartVersion="2.7.6" +mcrChartVersion="2.7.7" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 8a12b2f02..23594c7bc 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.6" +mcrChartVersion="2.7.7" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 63ca6de10..5a678781c 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -194,15 +194,15 @@ type DataItem struct { } type DataItemADX struct { - LogEntry string `json:"LogEntry"` - LogEntrySource string `json:"LogEntrySource"` - LogEntryTimeStamp string `json:"LogEntryTimeStamp"` - LogEntryTimeOfCommand string `json:"TimeOfCommand"` - ID string `json:"Id"` - Image string `json:"Image"` - Name string `json:"Name"` - SourceSystem string `json:"SourceSystem"` + TimeGenerated string `json:"TimeGenerated"` Computer string `json:"Computer"` + ContainerID string `json:"ContainerID"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` + //PodLabels string `json:"PodLabels"` AzureResourceId string `json:"AzureResourceId"` } @@ -422,7 +422,7 @@ func convert(in interface{}) (float64, bool) { func populateKubeMonAgentEventHash(record map[interface{}]interface{}, errType KubeMonAgentEventType) { var logRecordString = ToString(record["log"]) var eventTimeStamp = ToString(record["time"]) - containerID, _, podName := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) + containerID, _, podName, _ := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) Log("Locked EventHashUpdateMutex for updating hash \n ") EventHashUpdateMutex.Lock() @@ -816,7 +816,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { DataUpdateMutex.Unlock() for _, record := range tailPluginRecords { - containerID, k8sNamespace, _ := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) + containerID, k8sNamespace, k8sPodName, containerName := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"])) logEntrySource := ToString(record["stream"]) if strings.EqualFold(logEntrySource, "stdout") { @@ -867,16 +867,18 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if ResourceCentric == true { stringMap["AzureResourceId"] = ResourceID } + stringMap["PodName"] = k8sPodName + stringMap["PodNamespace"] = k8sNamespace + stringMap["ContainerName"] = containerName dataItemADX = DataItemADX{ - ID: stringMap["Id"], - LogEntry: stringMap["LogEntry"], - LogEntrySource: stringMap["LogEntrySource"], - LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], - LogEntryTimeOfCommand: stringMap["TimeOfCommand"], - SourceSystem: stringMap["SourceSystem"], + TimeGenerated: stringMap["LogEntryTimeStamp"], Computer: stringMap["Computer"], - Image: stringMap["Image"], - Name: stringMap["Name"], + ContainerID: stringMap["Id"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogEntry"], + LogSource: stringMap["LogEntrySource"], AzureResourceId: stringMap["AzureResourceId"], } //ADX @@ -1018,7 +1020,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { //ADXFlushMutex.Lock() //defer ADXFlushMutex.Unlock() //MultiJSON support is not there yet - if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogMapping", ingest.JSON), ingest.FileFormat(ingest.JSON), ingest.FlushImmediately()); ingestionErr != nil { + if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogv2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { Log("Error when streaming to ADX Ingestion: %s", ingestionErr.Error()) //ADXIngestor = nil //not required as per ADX team. Will keep it to indicate that we tried this approach @@ -1107,12 +1109,13 @@ func containsKey(currentMap map[string]bool, key string) bool { return c } -// GetContainerIDK8sNamespacePodNameFromFileName Gets the container ID, k8s namespace and pod name From the file Name +// GetContainerIDK8sNamespacePodNameFromFileName Gets the container ID, k8s namespace, pod name and containername From the file Name // sample filename kube-proxy-dgcx7_kube-system_kube-proxy-8df7e49e9028b60b5b0d0547f409c455a9567946cf763267b7e6fa053ab8c182.log -func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, string, string) { +func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, string, string, string) { id := "" ns := "" podName := "" + containerName := "" start := strings.LastIndex(filename, "-") end := strings.LastIndex(filename, ".") @@ -1132,6 +1135,15 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str ns = filename[start+1 : end] } + start = strings.LastIndex(filename, "_") + end = strings.LastIndex(filename, "-") + + if start >= end || start == -1 || end == -1 { + containerName = "" + } else { + containerName = filename[start+1 : end] + } + start = strings.Index(filename, "/containers/") end = strings.Index(filename, "_") @@ -1141,7 +1153,7 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str podName = filename[(start + len("/containers/")):end] } - return id, ns, podName + return id, ns, podName, containerName } // InitializePlugin reads and populates plugin configuration @@ -1313,8 +1325,8 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { CreateHTTPClient() - ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_ROUTE"))) - Log("AZMON_CONTAINER_LOGS_ROUTE:%s", ContainerLogsRoute) + ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE"))) + Log("AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE:%s", ContainerLogsRoute) ContainerLogsRouteV2 = false //default is ODS ContainerLogsRouteADX = false //default is LA @@ -1365,7 +1377,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { populateExcludedStdoutNamespaces() populateExcludedStderrNamespaces() - if enrichContainerLogs == true { + if enrichContainerLogs == true && ContainerLogsRouteADX != true { Log("ContainerLogEnrichment=true; starting goroutine to update containerimagenamemaps \n") go updateContainerImageNameMaps() } else { diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 8b1a3df65..91791ae1a 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -145,7 +145,7 @@ func CreateADXClient() { //log.Fatalf("Unable to create ADX connection %s", err.Error()) } else { Log("Successfully created ADX Client. Creating Ingestor...") - ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLog") + ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLogv2") if ingestorErr != nil { Log("Error::mdsd::Unable to create ADX ingestor %s", ingestorErr.Error()) } else { diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 9e0935480..67bd61667 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -248,7 +248,9 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met telemetryProps["dsPromUrl"] = @dsPromUrlCount end #telemetry about containerlogs Routing for daemonset - if (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) + if File.exist?(Constants::AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME) + telemetryProps["containerLogsRoute"] = "v2" + elsif (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) telemetryProps["containerLogsRoute"] = @containerLogsRoute end #telemetry about health model diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 35e5f9334..0e5099c5e 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -91,4 +91,6 @@ class Constants #Pod Statuses POD_STATUS_TERMINATING = "Terminating" + + AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME = "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" end From 49ec064fd7480de98ed1e08d4e6fb8ddd5726015 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 27 Oct 2020 20:48:43 -0700 Subject: [PATCH 05/18] merge from ci_dev to ci_prod (#463) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) Co-authored-by: Ganga Mahesh Siddem Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner --- .../templates/omsagent-daemonset-windows.yaml | 2 +- .../azuremonitor-containers/templates/omsagent-daemonset.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index c916fadf6..6a309c121 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -46,7 +46,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset-windows | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonsetwindows | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 8af13b6ee..d57c4d82b 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -40,7 +40,7 @@ spec: {{- end }} imagePullPolicy: IfNotPresent resources: -{{ toYaml .Values.omsagent.resources.daemonset-linux | indent 9 }} +{{ toYaml .Values.omsagent.resources.daemonsetlinux | indent 9 }} env: {{- if ne .Values.omsagent.env.clusterId "" }} - name: AKS_RESOURCE_ID diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index fa01c05bd..774e6203f 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -116,14 +116,14 @@ omsagent: ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: - daemonset-linux: + daemonsetlinux: requests: cpu: 75m memory: 225Mi limits: cpu: 150m memory: 600Mi - daemonset-windows: + daemonsetwindows: limits: cpu: 200m memory: 600Mi From 387d6c90ecd8962d0ff4118c370831e46636b388 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 27 Oct 2020 21:39:27 -0700 Subject: [PATCH 06/18] Merge from ci_dev into ci_prod (#464) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) Co-authored-by: Ganga Mahesh Siddem Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner --- ReleaseNotes.md | 10 ++++++++++ build/version | 6 +++--- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 2 +- scripts/onboarding/managed/enable-monitoring.ps1 | 2 +- scripts/onboarding/managed/enable-monitoring.sh | 2 +- scripts/onboarding/managed/upgrade-monitoring.sh | 2 +- 10 files changed, 28 insertions(+), 18 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index e1892d083..eb8e282b9 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,16 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 10/27/2020 - +##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) +##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Code change log +- Activate oneagent in few AKS regions (koreacentral,norwayeast) +- Disable syslog +- Fix timeout for Windows daemonset liveness probe +- Make request == limit for Windows daemonset resources (cpu & memory) +- Schema v2 for container log (ADX only - applicable only for select customers for piloting) + ### 10/05/2020 - ##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) ##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) diff --git a/build/version b/build/version index 9587328de..71c70020e 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=10 -CONTAINER_BUILDVERSION_MINOR=1 +CONTAINER_BUILDVERSION_MAJOR=11 +CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20201005 +CONTAINER_BUILDVERSION_DATE=20201027 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 6d45b05d8..bc35690e4 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.7 +version: 2.7.8 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 774e6203f..0f07a98c1 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod10052020" - tagWindows: "win-ciprod10052020" + tag: "ciprod10272020" + tagWindows: "win-ciprod10272020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index f4324a18a..c3428a44a 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10052020 +ARG IMAGE_TAG=ciprod10272020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 61f89b808..ca47d898d 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" imagePullPolicy: IfNotPresent resources: limits: @@ -496,13 +496,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" imagePullPolicy: IfNotPresent resources: limits: @@ -642,13 +642,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "10.1.0-0" + dockerProviderVersion: "11.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index c7dee60af..414817559 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10052020 +ARG IMAGE_TAG=win-ciprod10272020 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 4815dc958..22d34894f 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -60,7 +60,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.7" +$mcrChartVersion = "2.7.8" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index d7edf49dc..e0d26c370 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -42,7 +42,7 @@ set -o pipefail defaultAzureCloud="AzureCloud" # released chart version in mcr -mcrChartVersion="2.7.7" +mcrChartVersion="2.7.8" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 23594c7bc..4134d710f 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.7" +mcrChartVersion="2.7.8" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" From b448644924cffcb034ab4be2ca89c59ca7eaac25 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Mon, 9 Nov 2020 17:59:24 -0800 Subject: [PATCH 07/18] Merge from ci_dev into ci_prod (#469) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) * using latest stable version of msys2 (#465) * fixing the windows-perf-dups (#466) * chart updates related to new microsoft/charts repo (#467) * Changes for creating 11092020 release (#468) Co-authored-by: Ganga Mahesh Siddem Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner --- ReleaseNotes.md | 6 ++++++ build/version | 4 ++-- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/README.md | 18 ++++++++++-------- .../templates/NOTES.txt | 2 +- charts/azuremonitor-containers/values.yaml | 6 +++--- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 12 ++++++------ kubernetes/windows/Dockerfile | 4 ++-- .../onboarding/managed/enable-monitoring.ps1 | 2 +- .../onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- source/plugins/ruby/in_win_cadvisor_perf.rb | 11 +++++------ 13 files changed, 40 insertions(+), 33 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index eb8e282b9..ddfd01314 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,12 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 11/09/2020 - +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) +##### Code change log +- Fix for duplicate windows metrics + ### 10/27/2020 - ##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) ##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) diff --git a/build/version b/build/version index 71c70020e..a8b78ecac 100644 --- a/build/version +++ b/build/version @@ -5,8 +5,8 @@ CONTAINER_BUILDVERSION_MAJOR=11 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20201027 +CONTAINER_BUILDVERSION_BUILDNR=1 +CONTAINER_BUILDVERSION_DATE=20201109 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index bc35690e4..987841f77 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.8 +version: 2.7.9 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/README.md b/charts/azuremonitor-containers/README.md index 3b357ffd5..469fac94a 100644 --- a/charts/azuremonitor-containers/README.md +++ b/charts/azuremonitor-containers/README.md @@ -29,6 +29,8 @@ Monitoring your Kubernetes cluster and containers is critical, especially when r ## Installing the Chart +> Note: If you want to customize the chart, fork the chart code in https://github.com/microsoft/Docker-Provider/tree/ci_prod/charts/azuremonitor-containers + > Note: `--name` flag not required in Helm3 since this flag is deprecated > Note: use `omsagent.proxy` parameter to set the proxy endpoint if your K8s cluster configured behind the proxy. Refer to [configure proxy](#Configuring-Proxy-Endpoint) for more details about proxy. @@ -36,25 +38,25 @@ Monitoring your Kubernetes cluster and containers is critical, especially when r ### To Use Azure Log Analytics Workspace in Public Cloud ```bash -$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ +$ helm repo add microsoft https://microsoft.github.io/charts/repo $ helm install --name azmon-containers-release-1 \ ---set omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= incubator/azuremonitor-containers +--set omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= microsoft/azuremonitor-containers ``` ### To Use Azure Log Analytics Workspace in Azure China Cloud ```bash -$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ +$ helm repo add microsoft https://microsoft.github.io/charts/repo $ helm install --name azmon-containers-release-1 \ ---set omsagent.domain=opinsights.azure.cn,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= incubator/azuremonitor-containers +--set omsagent.domain=opinsights.azure.cn,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= microsoft/azuremonitor-containers ``` ### To Use Azure Log Analytics Workspace in Azure US Government Cloud ```bash -$ helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/ +$ helm repo add microsoft https://microsoft.github.io/charts/repo $ helm install --name azmon-containers-release-1 \ ---set omsagent.domain=opinsights.azure.us,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= incubator/azuremonitor-containers +--set omsagent.domain=opinsights.azure.us,omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= microsoft/azuremonitor-containers ``` ## Upgrading an existing Release to a new version @@ -112,13 +114,13 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm $ helm install --name myrelease-1 \ --set omsagent.secret.wsid=,omsagent.secret.key=,omsagent.env.clusterName= - incubator/azuremonitor-containers + microsoft/azuremonitor-containers ``` Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```bash -$ helm install --name myrelease-1 -f values.yaml incubator/azuremonitor-containers +$ helm install --name myrelease-1 -f values.yaml microsoft/azuremonitor-containers ``` diff --git a/charts/azuremonitor-containers/templates/NOTES.txt b/charts/azuremonitor-containers/templates/NOTES.txt index 372cecb95..48ebf33fc 100644 --- a/charts/azuremonitor-containers/templates/NOTES.txt +++ b/charts/azuremonitor-containers/templates/NOTES.txt @@ -29,7 +29,7 @@ This deployment will not complete. To proceed, run --set omsagent.secret.wsid= \ --set omsagent.secret.key= \ --set omsagent.env.clusterName= \ - incubator/azuremonitor-containers + microsoft/azuremonitor-containers {{- else -}} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 0f07a98c1..76ea0a26d 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod10272020" - tagWindows: "win-ciprod10272020" + tag: "ciprod11092020" + tagWindows: "win-ciprod11092020" pullPolicy: IfNotPresent - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index c3428a44a..d04e86128 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod10272020 +ARG IMAGE_TAG=ciprod11092020 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index ca47d898d..7d07eafcd 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -337,13 +337,13 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" imagePullPolicy: IfNotPresent resources: limits: @@ -496,13 +496,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" imagePullPolicy: IfNotPresent resources: limits: @@ -642,13 +642,13 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-0" + dockerProviderVersion: "11.0.0-1" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 414817559..10ea235b2 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,14 +3,14 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod10272020 +ARG IMAGE_TAG=win-ciprod11092020 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement RUN powershell -Command "Set-ExecutionPolicy Bypass -Scope Process -Force; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" # Fluentd depends on cool.io whose fat gem is only available for Ruby < 2.5, so need to specify --platform ruby when install Ruby > 2.5 and install msys2 to get dev tools RUN choco install -y ruby --version 2.6.5.1 --params "'/InstallDir:C:\ruby26'" \ -&& choco install -y msys2 --version 20190524.0.0.20191030 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ +&& choco install -y msys2 --version 20200903.0.0 --params "'/NoPath /NoUpdate /InstallDir:C:\ruby26\msys64'" \ && choco install -y vim # gangams - optional MSYS2 update via ridk failing in merged docker file so skipping that since we dont need optional update diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 22d34894f..b052f22c5 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -60,7 +60,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.8" +$mcrChartVersion = "2.7.9" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index e0d26c370..bb6974258 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -42,7 +42,7 @@ set -o pipefail defaultAzureCloud="AzureCloud" # released chart version in mcr -mcrChartVersion="2.7.8" +mcrChartVersion="2.7.9" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 4134d710f..11ecf6819 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.8" +mcrChartVersion="2.7.9" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" diff --git a/source/plugins/ruby/in_win_cadvisor_perf.rb b/source/plugins/ruby/in_win_cadvisor_perf.rb index 4e90195e5..9c267cf4f 100644 --- a/source/plugins/ruby/in_win_cadvisor_perf.rb +++ b/source/plugins/ruby/in_win_cadvisor_perf.rb @@ -10,7 +10,7 @@ class Win_CAdvisor_Perf_Input < Input def initialize super require "yaml" - require 'yajl/json_gem' + require "yajl/json_gem" require "time" require_relative "CAdvisorMetricsAPIClient" @@ -52,8 +52,6 @@ def shutdown def enumerate() time = Time.now.to_f begin - eventStream = MultiEventStream.new - insightsMetricsEventStream = MultiEventStream.new timeDifference = (DateTime.now.to_time.to_i - @@winNodeQueryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 @@istestvar = ENV["ISTEST"] @@ -70,6 +68,7 @@ def enumerate() @@winNodeQueryTimeTracker = DateTime.now.to_time.to_i end @@winNodes.each do |winNode| + eventStream = MultiEventStream.new metricData = CAdvisorMetricsAPIClient.getMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601) metricData.each do |record| if !record.empty? @@ -81,7 +80,6 @@ def enumerate() router.emit_stream(@tag, eventStream) if eventStream router.emit_stream(@mdmtag, eventStream) if eventStream - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) $log.info("winCAdvisorPerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") end @@ -90,6 +88,7 @@ def enumerate() begin containerGPUusageInsightsMetricsDataItems = [] containerGPUusageInsightsMetricsDataItems.concat(CAdvisorMetricsAPIClient.getInsightsMetrics(winNode: winNode, metricTime: Time.now.utc.iso8601)) + insightsMetricsEventStream = MultiEventStream.new containerGPUusageInsightsMetricsDataItems.each do |insightsMetricsRecord| wrapper = { @@ -104,12 +103,12 @@ def enumerate() router.emit_stream(@mdmtag, insightsMetricsEventStream) if insightsMetricsEventStream if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) $log.info("winCAdvisorInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end + end rescue => errorStr $log.warn "Failed when processing GPU Usage metrics in_win_cadvisor_perf : #{errorStr}" $log.debug_backtrace(errorStr.backtrace) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end + end #end GPU InsightsMetrics items end From 9287425be5792c4d3ea92a9708dc411d79350cf4 Mon Sep 17 00:00:00 2001 From: Ganga Mahesh Siddem Date: Wed, 13 Jan 2021 14:05:36 -0800 Subject: [PATCH 08/18] Gangams/changes for release ciprod01112021 (#490) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) * using latest stable version of msys2 (#465) * fixing the windows-perf-dups (#466) * chart updates related to new microsoft/charts repo (#467) * Changes for creating 11092020 release (#468) * MDM exception aggregation (#470) * grwehner/mdm custom metric regions (#471) Remove custom metrics region check for public cloud * updaitng rs limit to 1gb (#474) * grwehner/pv inventory (#455) Add fluentd plugin to request persistent volume info from the kubernetes api and send to LA * Gangams/fix for build release pipeline issue (#476) * use isolated cdpx acr * correct comment * add pv fluentd plugin config to helm rs config (#477) * add pv fluentd plugin to helm rs config * helm rbac permissions for pv api calls * Gangams/fix rs ooming (#473) * optimize kpi * optimize kube node inventory * add flags for events, deployments and hpa * have separate function parseNodeLimits * refactor code * fix crash * fix bug with service name * fix bugs related to get service name * update oom fix test agent * debug logs * fix service label issue * update to latest agent and enable ephemeral annotation * change stream size to 200 from 250 * update yaml * adjust chunksizes * add ruby gc env * yaml changes for cioomtest11282020-3 * telemetry to track pods latency * service count telemetry * rename variables * wip * nodes inventory telemetry * configmap changes * add emit streams in configmap * yaml updates * fix copy and paste bug * add todo comments * fix node latency telemetry bug * update yaml with latest test image * fix bug * upping rs memory change * fix mdm bug with final emit stream * update to latest image * fix pr feedback * fix pr feedback * rename health config to agent config * fix max allowed hpa chunk size * update to use 1k pod chunk since validated on 1.18+ * remove debug logs * minor updates * move defaults to common place * chart updates * final oomfix agent * update to use prod image so that can be validated with build pipeline * fix typo in comment * Gangams/enable arc onboarding to ff (#478) * wip * updates * trigger login if the ctx cloud not same as specified cloud * add missed commit * Convert PV type dictionary to json for telemetry so it shows up in logs (#480) * fix 2 windows tasks - 1) Dont log to termination log 2) enable ADX route for containerlogs in windows (for O365) (#482) * fix ci envvar collection in large pods (#483) * grwehner/jan agent tasks (#481) - Windows agent fix to use log filtering settings in config map. - Error handling for kubelet_utils get_node_capacity in case /metrics/cadvsior endpoint fails. - Remove env variable for workspace key for windows agent * updating fbit version and cpu limit (#485) * reverting to older version (#487) * Gangams/add fbsettings configurable via configmap (#486) * wip * fbit config settings * add config warn message * handle one config provided but not other * fixed pr feedback * fix copy paste error * rename config parameter names * fix typo * fix fbit crash in helm path * fix nil check * Gangams/jan agent release tasks (#484) * wip * explicit amd64 affinity for hybrid workloads * fix space issue * wip * revert vscode setting file * remove per container logs in ci (#488) * updates for ciprod01112021 release Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner --- .pipelines/get-aad-app-creds-from-kv.sh | 14 + ...rom-cdpx-and-push-to-ci-acr-linux-image.sh | 15 +- ...m-cdpx-and-push-to-ci-acr-windows-image.sh | 14 +- ReleaseNotes.md | 78 ++ .../scripts/td-agent-bit-conf-customizer.rb | 11 +- build/common/installer/scripts/tomlparser.rb | 4 +- build/linux/installer/conf/container.conf | 2 - build/linux/installer/conf/kube.conf | 26 +- .../installer/datafiles/base_container.data | 3 +- .../scripts/tomlparser-agent-config.rb | 220 ++++++ .../scripts/tomlparser-health-config.rb | 73 -- build/version | 6 +- .../installer/certificategenerator/Program.cs | 8 +- build/windows/installer/conf/fluent.conf | 12 +- .../installer/scripts/livenessprobe.cmd | 24 +- charts/azuremonitor-containers/Chart.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 4 + .../templates/omsagent-daemonset.yaml | 4 + .../templates/omsagent-rbac.yaml | 2 +- .../templates/omsagent-rs-configmap.yaml | 34 +- charts/azuremonitor-containers/values.yaml | 44 +- kubernetes/linux/Dockerfile | 3 +- kubernetes/linux/main.sh | 36 +- kubernetes/linux/setup.sh | 2 +- kubernetes/omsagent.yaml | 69 +- kubernetes/windows/Dockerfile | 2 +- kubernetes/windows/main.ps1 | 18 +- .../onboarding/managed/disable-monitoring.ps1 | 34 +- .../onboarding/managed/disable-monitoring.sh | 17 + .../onboarding/managed/enable-monitoring.ps1 | 49 +- .../onboarding/managed/enable-monitoring.sh | 40 +- .../onboarding/managed/upgrade-monitoring.sh | 21 +- .../health/omsagent-template-aks-engine.yaml | 2 - scripts/preview/health/omsagent-template.yaml | 2 - source/plugins/ruby/CustomMetricsUtils.rb | 12 +- source/plugins/ruby/KubernetesApiClient.rb | 387 +++++----- source/plugins/ruby/constants.rb | 116 +-- source/plugins/ruby/filter_cadvisor2mdm.rb | 15 +- source/plugins/ruby/filter_inventory2mdm.rb | 3 +- source/plugins/ruby/filter_telegraf2mdm.rb | 3 +- source/plugins/ruby/in_kube_events.rb | 18 +- source/plugins/ruby/in_kube_nodes.rb | 410 ++++++---- source/plugins/ruby/in_kube_podinventory.rb | 720 ++++++++++-------- source/plugins/ruby/in_kube_pvinventory.rb | 253 ++++++ .../plugins/ruby/in_kubestate_deployments.rb | 424 ++++++----- source/plugins/ruby/in_kubestate_hpa.rb | 421 +++++----- .../ruby/kubernetes_container_inventory.rb | 53 +- source/plugins/ruby/out_mdm.rb | 51 +- source/plugins/ruby/podinventory_to_mdm.rb | 4 +- 49 files changed, 2428 insertions(+), 1357 deletions(-) create mode 100644 build/linux/installer/scripts/tomlparser-agent-config.rb delete mode 100644 build/linux/installer/scripts/tomlparser-health-config.rb create mode 100644 source/plugins/ruby/in_kube_pvinventory.rb diff --git a/.pipelines/get-aad-app-creds-from-kv.sh b/.pipelines/get-aad-app-creds-from-kv.sh index 8ef56cddb..a0ba464cc 100755 --- a/.pipelines/get-aad-app-creds-from-kv.sh +++ b/.pipelines/get-aad-app-creds-from-kv.sh @@ -11,6 +11,8 @@ do KV) KV=$VALUE ;; KVSECRETNAMEAPPID) AppId=$VALUE ;; KVSECRETNAMEAPPSECRET) AppSecret=$VALUE ;; + KVSECRETNAMECDPXAPPID) CdpxAppId=$VALUE ;; + KVSECRETNAMECDPXAPPSECRET) CdpxAppSecret=$VALUE ;; *) esac done @@ -27,4 +29,16 @@ az keyvault secret download --file ~/acrappsecret --vault-name ${KV} --name ${A echo "downloaded the appsecret from KV:${KV} and KV secret:${AppSecret}" +echo "key vault secret name for cdpx appid:${KVSECRETNAMECDPXAPPID}" + +echo "key vault secret name for cdpx appsecret:${KVSECRETNAMECDPXAPPSECRET}" + +az keyvault secret download --file ~/cdpxacrappid --vault-name ${KV} --name ${CdpxAppId} + +echo "downloaded the appid from KV:${KV} and KV secret:${CdpxAppId}" + +az keyvault secret download --file ~/cdpxacrappsecret --vault-name ${KV} --name ${CdpxAppSecret} + +echo "downloaded the appsecret from KV:${KV} and KV secret:${CdpxAppSecret}" + echo "end: get app id and secret from specified key vault" diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh index 638d3a937..3844ea185 100755 --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh @@ -25,12 +25,21 @@ ACR_APP_ID=$(cat ~/acrappid) ACR_APP_SECRET=$(cat ~/acrappsecret) echo "end: read appid and appsecret" +echo "start: read appid and appsecret for cdpx" +CDPX_ACR_APP_ID=$(cat ~/cdpxacrappid) +CDPX_ACR_APP_SECRET=$(cat ~/cdpxacrappsecret) +echo "end: read appid and appsecret which has read access on cdpx acr" + + +# Name of CDPX_ACR should be in this format :Naming convention: 'cdpx' + service tree id without '-' + two digit suffix like'00'/'01 +# suffix 00 primary and 01 secondary, and we only use primary +# This configured via pipeline variable echo "login to cdpxlinux acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET +docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET echo "login to cdpxlinux acr completed: ${CDPX_ACR}" echo "pull agent image from cdpxlinux acr: ${CDPX_ACR}" -docker pull ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} +docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} echo "pull image from cdpxlinux acr completed: ${CDPX_ACR}" echo "CI Release name is:"$CI_RELEASE @@ -41,7 +50,7 @@ echo "CI ACR : ${CI_ACR}" echo "CI AGENT REPOSITORY NAME : ${CI_AGENT_REPO}" echo "tag linux agent image" -docker tag ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} echo "login ciprod acr":$CI_ACR docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh index 066410af5..095a00039 100755 --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh @@ -25,12 +25,20 @@ ACR_APP_ID=$(cat ~/acrappid ) ACR_APP_SECRET=$(cat ~/acrappsecret) echo "end: read appid and appsecret" +echo "start: read appid and appsecret for cdpx" +CDPX_ACR_APP_ID=$(cat ~/cdpxacrappid) +CDPX_ACR_APP_SECRET=$(cat ~/cdpxacrappsecret) +echo "end: read appid and appsecret which has read access on cdpx acr" + +# Name of CDPX_ACR should be in this format :Naming convention: 'cdpx' + service tree id without '-' + two digit suffix like'00'/'01 +# suffix 00 primary and 01 secondary, and we only use primary +# This configured via pipeline variable echo "login to cdpxwindows acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET +docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET echo "login to cdpxwindows acr:${CDPX_ACR} completed" echo "pull image from cdpxwin acr: ${CDPX_ACR}" -docker pull ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} +docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} echo "pull image from cdpxwin acr completed: ${CDPX_ACR}" echo "CI Release name:"$CI_RELEASE @@ -40,7 +48,7 @@ imagetag="win-"$CI_RELEASE$CI_IMAGE_TAG_SUFFIX echo "agentimagetag="$imagetag echo "tag windows agent image" -docker tag ${CDPX_ACR}/artifact/3170cdd2-19f0-4027-912b-1027311691a2/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} echo "login to ${CI_ACR} acr" docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET diff --git a/ReleaseNotes.md b/ReleaseNotes.md index ddfd01314..2afe16481 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -10,6 +10,84 @@ additional questions or comments. ## Release History Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 01/11/2021 - +##### Version microsoft/oms:ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021 (linux) +##### Version microsoft/oms:win-ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021 (windows) +##### Code change log +- Fixes for Linux Agent Replicaset Pod OOMing issue +- Update fluentbit (1.14.2 to 1.6.8) for the Linux Daemonset +- Make Fluentbit settings: log_flush_interval_secs, tail_buf_chunksize_megabytes and tail_buf_maxsize_megabytes configurable via configmap +- Support for PV inventory collection +- Removal of Custom metric region check for Public cloud regions and update to use cloud environment variable to determine the custom metric support +- For daemonset pods, add the dnsconfig to use ndots: 3 from ndots:5 to optimize the number of DNS API calls made +- Fix for inconsistency in the collection container environment variables for the pods which has high number of containers +- Fix for disabling of std{out;err} log_collection_settings via configmap issue in windows daemonset +- Update to use workspace key from mount file rather than environment variable for windows daemonset agent +- Remove per container info logs in the container inventory +- Enable ADX route for windows container logs +- Remove logging to termination log in windows agent liveness probe + + +### 11/09/2020 - +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) +##### Code change log +- Fix for duplicate windows metrics + +### 10/27/2020 - +##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) +##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Code change log +- Activate oneagent in few AKS regions (koreacentral,norwayeast) +- Disable syslog +- Fix timeout for Windows daemonset liveness probe +- Make request == limit for Windows daemonset resources (cpu & memory) +- Schema v2 for container log (ADX only - applicable only for select customers for piloting) + +### 10/05/2020 - +##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) +##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Code change log +- Health CRD to version v1 (from v1beta1) for k8s versions >= 1.19.0 +- Collection of PV usage metrics for PVs mounted by pods (kube-system pods excluded by default)(doc-link-needed) +- Zero fill few custom metrics under a timer, also add zero filling for new PV usage metrics +- Collection of additional Kubelet metrics ('kubelet_running_pod_count','volume_manager_total_volumes','kubelet_node_config_error','process_resident_memory_bytes','process_cpu_seconds_total','kubelet_runtime_operations_total','kubelet_runtime_operations_errors_total'). This also includes updates to 'kubelet' workbook to include these new metrics +- Collection of Azure NPM (Network Policy Manager) metrics (basic & advanced. By default, NPM metrics collection is turned OFF)(doc-link-needed) +- Support log collection when docker root is changed with knode. Tracked by [this](https://github.com/Azure/AKS/issues/1373) issue +- Support for Pods in 'Terminating' state for nodelost scenarios +- Fix for reduction in telemetry for custom metrics ingestion failures +- Fix CPU capacity/limits metrics being 0 for Virtual nodes (VK) +- Add new custom metric regions (eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth) +- Enable strict SSL validation for AppInsights Ruby SDK +- Turn off custom metrics upload for unsupported cluster types +- Install CA certs from wire server for windows (in certain clouds) + +### 09/16/2020 - +> Note: This agent release targetted ONLY for non-AKS clusters via Azure Monitor for containers HELM chart update +##### Version microsoft/oms:ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020 (linux) +##### Version microsoft/oms:win-ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020 (windows) +##### Code change log +- Collection of Azure Network Policy Manager Basic and Advanced metrics +- Add support in Windows Agent for Container log collection of CRI runtimes such as ContainerD +- Alertable metrics support Arc K8s cluster to parity with AKS +- Support for multiple container log mount paths when docker is updated through knode +- Bug fix related to MDM telemetry + +### 08/07/2020 - +##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) +##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) +##### Code change log +- Collection of KubeState metrics for deployments and HPA +- Add the Proxy support for Windows agent +- Fix for ContainerState in ContainerInventory to handle Failed state and collection of environment variables for terminated and failed containers +- Change /spec to /metrics/cadvisor endpoint to collect node capacity metrics +- Disable Health Plugin by default and can enabled via configmap +- Pin version of jq to 1.5+dfsg-2 +- Bug fix for showing node as 'not ready' when there is disk pressure +- oneagent integration (disabled by default) +- Add region check before sending alertable metrics to MDM +- Telemetry fix for agent telemetry for sov. clouds + ### 11/09/2020 - ##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index fae3acb36..35b71e550 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -18,12 +18,17 @@ def substituteFluentBitPlaceHolders bufferChunkSize = ENV["FBIT_TAIL_BUFFER_CHUNK_SIZE"] bufferMaxSize = ENV["FBIT_TAIL_BUFFER_MAX_SIZE"] - serviceInterval = (!interval.nil? && is_number?(interval)) ? interval : @default_service_interval + serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval - tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize)) ? bufferChunkSize : nil + tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : nil - tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize)) ? bufferMaxSize : nil + tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : nil + + if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) + puts "config:warn buffer max size must be greater or equal to chunk size" + tailBufferMaxSize = tailBufferChunkSize + end text = File.read(@td_agent_bit_conf_path) new_contents = text.gsub("${SERVICE_FLUSH_INTERVAL}", serviceIntervalSetting) diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index 7235ee0c3..fe26f639e 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -228,7 +228,7 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_LOG_TAIL_PATH', @logTailPath) file.write(commands) - commands = get_command_windows('AZMON_LOG_EXCLUSION_REGEX_PATTERN', @stdoutExcludeNamespaces) + commands = get_command_windows('AZMON_LOG_EXCLUSION_REGEX_PATTERN', @logExclusionRegexPattern) file.write(commands) commands = get_command_windows('AZMON_STDOUT_EXCLUDED_NAMESPACES', @stdoutExcludeNamespaces) file.write(commands) @@ -244,7 +244,7 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS', @collectAllKubeEvents) file.write(commands) - commands = get_command_windows('AZMON_CONTAINER_LOGS_ROUTE', @containerLogsRoute) + commands = get_command_windows('AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE', @containerLogsRoute) file.write(commands) # Close file after writing all environment variables diff --git a/build/linux/installer/conf/container.conf b/build/linux/installer/conf/container.conf index f7e6e1da9..958a85eb6 100644 --- a/build/linux/installer/conf/container.conf +++ b/build/linux/installer/conf/container.conf @@ -45,14 +45,12 @@ #custom_metrics_mdm filter plugin type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes,pvUsedBytes log_level info type filter_telegraf2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level debug diff --git a/build/linux/installer/conf/kube.conf b/build/linux/installer/conf/kube.conf index dbb4db0da..fb566c360 100644 --- a/build/linux/installer/conf/kube.conf +++ b/build/linux/installer/conf/kube.conf @@ -13,7 +13,14 @@ tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth + + + #Kubernetes Persistent Volume inventory + + type kubepvinventory + tag oms.containerinsights.KubePVInventory + run_interval 60 + log_level debug #Kubernetes events @@ -66,14 +73,12 @@ type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info @@ -98,6 +103,21 @@ max_retry_wait 5m + + type out_oms + log_level debug + num_threads 5 + buffer_chunk_limit 4m + buffer_type file + buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer + buffer_queue_limit 20 + buffer_queue_full_action drop_oldest_chunk + flush_interval 20s + retry_limit 10 + retry_wait 5s + max_retry_wait 5m + + type out_oms log_level debug diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index ca2538b79..c680f0eea 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -22,6 +22,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/omsagent/plugin/filter_container.rb; source/plugins/ruby/filter_container.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_podinventory.rb; source/plugins/ruby/in_kube_podinventory.rb; 644; root; root +/opt/microsoft/omsagent/plugin/in_kube_pvinventory.rb; source/plugins/ruby/in_kube_pvinventory.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_events.rb; source/plugins/ruby/in_kube_events.rb; 644; root; root /opt/microsoft/omsagent/plugin/KubernetesApiClient.rb; source/plugins/ruby/KubernetesApiClient.rb; 644; root; root @@ -122,7 +123,7 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root /opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root -/opt/tomlparser-health-config.rb; build/linux/installer/scripts/tomlparser-health-config.rb; 755; root; root +/opt/tomlparser-agent-config.rb; build/linux/installer/scripts/tomlparser-agent-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root diff --git a/build/linux/installer/scripts/tomlparser-agent-config.rb b/build/linux/installer/scripts/tomlparser-agent-config.rb new file mode 100644 index 000000000..e587909e5 --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-agent-config.rb @@ -0,0 +1,220 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end + +require_relative "ConfigParseErrorLogger" + +@configMapMountPath = "/etc/config/settings/agent-settings" +@configSchemaVersion = "" +@enable_health_model = false + +# 250 Node items (15KB per node) account to approximately 4MB +@nodesChunkSize = 250 +# 1000 pods (10KB per pod) account to approximately 10MB +@podsChunkSize = 1000 +# 4000 events (1KB per event) account to approximately 4MB +@eventsChunkSize = 4000 +# roughly each deployment is 8k +# 500 deployments account to approximately 4MB +@deploymentsChunkSize = 500 +# roughly each HPA is 3k +# 2000 HPAs account to approximately 6-7MB +@hpaChunkSize = 2000 +# stream batch sizes to avoid large file writes +# too low will consume higher disk iops +@podsEmitStreamBatchSize = 200 +@nodesEmitStreamBatchSize = 100 + +# higher the chunk size rs pod memory consumption higher and lower api latency +# similarly lower the value, helps on the memory consumption but incurrs additional round trip latency +# these needs to be tuned be based on the workload +# nodes +@nodesChunkSizeMin = 100 +@nodesChunkSizeMax = 400 +# pods +@podsChunkSizeMin = 250 +@podsChunkSizeMax = 1500 +# events +@eventsChunkSizeMin = 2000 +@eventsChunkSizeMax = 10000 +# deployments +@deploymentsChunkSizeMin = 500 +@deploymentsChunkSizeMax = 1000 +# hpa +@hpaChunkSizeMin = 500 +@hpaChunkSizeMax = 2000 + +# emit stream sizes to prevent lower values which costs disk i/o +# max will be upto the chunk size +@podsEmitStreamBatchSizeMin = 50 +@nodesEmitStreamBatchSizeMin = 50 + +# configmap settings related fbit config +@fbitFlushIntervalSecs = 0 +@fbitTailBufferChunkSizeMBs = 0 +@fbitTailBufferMaxSizeMBs = 0 + + +def is_number?(value) + true if Integer(value) rescue false +end + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for agent settings mounted, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for agent settings not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for agent settings : #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && !parsedConfig[:agent_settings].nil? + if !parsedConfig[:agent_settings][:health_model].nil? && !parsedConfig[:agent_settings][:health_model][:enabled].nil? + @enable_health_model = parsedConfig[:agent_settings][:health_model][:enabled] + puts "enable_health_model = #{@enable_health_model}" + end + chunk_config = parsedConfig[:agent_settings][:chunk_config] + if !chunk_config.nil? + nodesChunkSize = chunk_config[:NODES_CHUNK_SIZE] + if !nodesChunkSize.nil? && is_number?(nodesChunkSize) && (@nodesChunkSizeMin..@nodesChunkSizeMax) === nodesChunkSize.to_i + @nodesChunkSize = nodesChunkSize.to_i + puts "Using config map value: NODES_CHUNK_SIZE = #{@nodesChunkSize}" + end + + podsChunkSize = chunk_config[:PODS_CHUNK_SIZE] + if !podsChunkSize.nil? && is_number?(podsChunkSize) && (@podsChunkSizeMin..@podsChunkSizeMax) === podsChunkSize.to_i + @podsChunkSize = podsChunkSize.to_i + puts "Using config map value: PODS_CHUNK_SIZE = #{@podsChunkSize}" + end + + eventsChunkSize = chunk_config[:EVENTS_CHUNK_SIZE] + if !eventsChunkSize.nil? && is_number?(eventsChunkSize) && (@eventsChunkSizeMin..@eventsChunkSizeMax) === eventsChunkSize.to_i + @eventsChunkSize = eventsChunkSize.to_i + puts "Using config map value: EVENTS_CHUNK_SIZE = #{@eventsChunkSize}" + end + + deploymentsChunkSize = chunk_config[:DEPLOYMENTS_CHUNK_SIZE] + if !deploymentsChunkSize.nil? && is_number?(deploymentsChunkSize) && (@deploymentsChunkSizeMin..@deploymentsChunkSizeMax) === deploymentsChunkSize.to_i + @deploymentsChunkSize = deploymentsChunkSize.to_i + puts "Using config map value: DEPLOYMENTS_CHUNK_SIZE = #{@deploymentsChunkSize}" + end + + hpaChunkSize = chunk_config[:HPA_CHUNK_SIZE] + if !hpaChunkSize.nil? && is_number?(hpaChunkSize) && (@hpaChunkSizeMin..@hpaChunkSizeMax) === hpaChunkSize.to_i + @hpaChunkSize = hpaChunkSize.to_i + puts "Using config map value: HPA_CHUNK_SIZE = #{@hpaChunkSize}" + end + + podsEmitStreamBatchSize = chunk_config[:PODS_EMIT_STREAM_BATCH_SIZE] + if !podsEmitStreamBatchSize.nil? && is_number?(podsEmitStreamBatchSize) && + podsEmitStreamBatchSize.to_i <= @podsChunkSize && podsEmitStreamBatchSize.to_i >= @podsEmitStreamBatchSizeMin + @podsEmitStreamBatchSize = podsEmitStreamBatchSize.to_i + puts "Using config map value: PODS_EMIT_STREAM_BATCH_SIZE = #{@podsEmitStreamBatchSize}" + end + nodesEmitStreamBatchSize = chunk_config[:NODES_EMIT_STREAM_BATCH_SIZE] + if !nodesEmitStreamBatchSize.nil? && is_number?(nodesEmitStreamBatchSize) && + nodesEmitStreamBatchSize.to_i <= @nodesChunkSize && nodesEmitStreamBatchSize.to_i >= @nodesEmitStreamBatchSizeMin + @nodesEmitStreamBatchSize = nodesEmitStreamBatchSize.to_i + puts "Using config map value: NODES_EMIT_STREAM_BATCH_SIZE = #{@nodesEmitStreamBatchSize}" + end + end + # fbit config settings + fbit_config = parsedConfig[:agent_settings][:fbit_config] + if !fbit_config.nil? + fbitFlushIntervalSecs = fbit_config[:log_flush_interval_secs] + if !fbitFlushIntervalSecs.nil? && is_number?(fbitFlushIntervalSecs) && fbitFlushIntervalSecs.to_i > 0 + @fbitFlushIntervalSecs = fbitFlushIntervalSecs.to_i + puts "Using config map value: log_flush_interval_secs = #{@fbitFlushIntervalSecs}" + end + + fbitTailBufferChunkSizeMBs = fbit_config[:tail_buf_chunksize_megabytes] + if !fbitTailBufferChunkSizeMBs.nil? && is_number?(fbitTailBufferChunkSizeMBs) && fbitTailBufferChunkSizeMBs.to_i > 0 + @fbitTailBufferChunkSizeMBs = fbitTailBufferChunkSizeMBs.to_i + puts "Using config map value: tail_buf_chunksize_megabytes = #{@fbitTailBufferChunkSizeMBs}" + end + + fbitTailBufferMaxSizeMBs = fbit_config[:tail_buf_maxsize_megabytes] + if !fbitTailBufferMaxSizeMBs.nil? && is_number?(fbitTailBufferMaxSizeMBs) && fbitTailBufferMaxSizeMBs.to_i > 0 + if fbitTailBufferMaxSizeMBs.to_i >= @fbitTailBufferChunkSizeMBs + @fbitTailBufferMaxSizeMBs = fbitTailBufferMaxSizeMBs.to_i + puts "Using config map value: tail_buf_maxsize_megabytes = #{@fbitTailBufferMaxSizeMBs}" + else + # tail_buf_maxsize_megabytes has to be greater or equal to tail_buf_chunksize_megabytes + @fbitTailBufferMaxSizeMBs = @fbitTailBufferChunkSizeMBs + puts "config::warn: tail_buf_maxsize_megabytes must be greater or equal to value of tail_buf_chunksize_megabytes. Using tail_buf_maxsize_megabytes = #{@fbitTailBufferMaxSizeMBs} since provided config value not valid" + end + end + # in scenario - tail_buf_chunksize_megabytes provided but not tail_buf_maxsize_megabytes to prevent fbit crash + if @fbitTailBufferChunkSizeMBs > 0 && @fbitTailBufferMaxSizeMBs == 0 + @fbitTailBufferMaxSizeMBs = @fbitTailBufferChunkSizeMBs + puts "config::warn: since tail_buf_maxsize_megabytes not provided hence using tail_buf_maxsize_megabytes=#{@fbitTailBufferMaxSizeMBs} which is same as the value of tail_buf_chunksize_megabytes" + end + end + end + rescue => errorStr + puts "config::error:Exception while reading config settings for agent configuration setting - #{errorStr}, using defaults" + @enable_health_model = false + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") + end + @enable_health_model = false +end + +# Write the settings to file, so that they can be set as environment variables +file = File.open("agent_config_env_var", "w") + +if !file.nil? + file.write("export AZMON_CLUSTER_ENABLE_HEALTH_MODEL=#{@enable_health_model}\n") + file.write("export NODES_CHUNK_SIZE=#{@nodesChunkSize}\n") + file.write("export PODS_CHUNK_SIZE=#{@podsChunkSize}\n") + file.write("export EVENTS_CHUNK_SIZE=#{@eventsChunkSize}\n") + file.write("export DEPLOYMENTS_CHUNK_SIZE=#{@deploymentsChunkSize}\n") + file.write("export HPA_CHUNK_SIZE=#{@hpaChunkSize}\n") + file.write("export PODS_EMIT_STREAM_BATCH_SIZE=#{@podsEmitStreamBatchSize}\n") + file.write("export NODES_EMIT_STREAM_BATCH_SIZE=#{@nodesEmitStreamBatchSize}\n") + # fbit settings + if @fbitFlushIntervalSecs > 0 + file.write("export FBIT_SERVICE_FLUSH_INTERVAL=#{@fbitFlushIntervalSecs}\n") + end + if @fbitTailBufferChunkSizeMBs > 0 + file.write("export FBIT_TAIL_BUFFER_CHUNK_SIZE=#{@fbitTailBufferChunkSizeMBs}\n") + end + if @fbitTailBufferMaxSizeMBs > 0 + file.write("export FBIT_TAIL_BUFFER_MAX_SIZE=#{@fbitTailBufferMaxSizeMBs}\n") + end + # Close file after writing all environment variables + file.close +else + puts "Exception while opening file for writing config environment variables" + puts "****************End Config Processing********************" +end diff --git a/build/linux/installer/scripts/tomlparser-health-config.rb b/build/linux/installer/scripts/tomlparser-health-config.rb deleted file mode 100644 index 14c8bdb44..000000000 --- a/build/linux/installer/scripts/tomlparser-health-config.rb +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/local/bin/ruby - -#this should be require relative in Linux and require in windows, since it is a gem install on windows -@os_type = ENV["OS_TYPE"] -if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 - require "tomlrb" -else - require_relative "tomlrb" -end - -require_relative "ConfigParseErrorLogger" - -@configMapMountPath = "/etc/config/settings/agent-settings" -@configSchemaVersion = "" -@enable_health_model = false - -# Use parser to parse the configmap toml file to a ruby structure -def parseConfigMap - begin - # Check to see if config map is created - if (File.file?(@configMapMountPath)) - puts "config::configmap container-azm-ms-agentconfig for agent health settings mounted, parsing values" - parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) - puts "config::Successfully parsed mounted config map" - return parsedConfig - else - puts "config::configmap container-azm-ms-agentconfig for agent health settings not mounted, using defaults" - return nil - end - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config map for enabling health: #{errorStr}, using defaults, please check config map for errors") - return nil - end -end - -# Use the ruby structure created after config parsing to set the right values to be used as environment variables -def populateSettingValuesFromConfigMap(parsedConfig) - begin - if !parsedConfig.nil? && !parsedConfig[:agent_settings].nil? && !parsedConfig[:agent_settings][:health_model].nil? && !parsedConfig[:agent_settings][:health_model][:enabled].nil? - @enable_health_model = parsedConfig[:agent_settings][:health_model][:enabled] - puts "enable_health_model = #{@enable_health_model}" - end - rescue => errorStr - puts "config::error:Exception while reading config settings for health_model enabled setting - #{errorStr}, using defaults" - @enable_health_model = false - end -end - -@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] -puts "****************Start Config Processing********************" -if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it - configMapSettings = parseConfigMap - if !configMapSettings.nil? - populateSettingValuesFromConfigMap(configMapSettings) - end -else - if (File.file?(@configMapMountPath)) - ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version") - end - @enable_health_model = false -end - -# Write the settings to file, so that they can be set as environment variables -file = File.open("health_config_env_var", "w") - -if !file.nil? - file.write("export AZMON_CLUSTER_ENABLE_HEALTH_MODEL=#{@enable_health_model}\n") - # Close file after writing all environment variables - file.close -else - puts "Exception while opening file for writing config environment variables" - puts "****************End Config Processing********************" -end \ No newline at end of file diff --git a/build/version b/build/version index a8b78ecac..711a96921 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=11 +CONTAINER_BUILDVERSION_MAJOR=12 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 -CONTAINER_BUILDVERSION_BUILDNR=1 -CONTAINER_BUILDVERSION_DATE=20201109 +CONTAINER_BUILDVERSION_BUILDNR=0 +CONTAINER_BUILDVERSION_DATE=20210111 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/build/windows/installer/certificategenerator/Program.cs b/build/windows/installer/certificategenerator/Program.cs index 43063c4be..e24d0e303 100644 --- a/build/windows/installer/certificategenerator/Program.cs +++ b/build/windows/installer/certificategenerator/Program.cs @@ -414,14 +414,12 @@ static void Main(string[] args) try { - if (!String.IsNullOrEmpty(Environment.GetEnvironmentVariable("WSKEY"))) - { - logAnalyticsWorkspaceSharedKey = Environment.GetEnvironmentVariable("WSKEY"); - } + // WSKEY isn't stored as an environment variable + logAnalyticsWorkspaceSharedKey = File.ReadAllText("C:/etc/omsagent-secret/KEY").Trim(); } catch (Exception ex) { - Console.WriteLine("Failed to read env variables (WSKEY)" + ex.Message); + Console.WriteLine("Failed to read secret (WSKEY)" + ex.Message); } try diff --git a/build/windows/installer/conf/fluent.conf b/build/windows/installer/conf/fluent.conf index c96300b1e..d5eb475ca 100644 --- a/build/windows/installer/conf/fluent.conf +++ b/build/windows/installer/conf/fluent.conf @@ -6,7 +6,8 @@ @type tail - path /var/log/containers/*.log + path "#{ENV['AZMON_LOG_TAIL_PATH']}" + exclude_path "#{ENV['AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH']}" pos_file /var/opt/microsoft/fluent/fluentd-containers.log.pos tag oms.container.log.la @log_level trace @@ -28,6 +29,14 @@ @include fluent-docker-parser.conf + + @type grep + + key stream + pattern "#{ENV['AZMON_LOG_EXCLUSION_REGEX_PATTERN']}" + + + @type record_transformer # fluent-plugin-record-modifier more light-weight but needs to be installed (dependency worth it?) @@ -37,7 +46,6 @@ - @type forward send_timeout 60s diff --git a/build/windows/installer/scripts/livenessprobe.cmd b/build/windows/installer/scripts/livenessprobe.cmd index 06d577f31..19d0b69d7 100644 --- a/build/windows/installer/scripts/livenessprobe.cmd +++ b/build/windows/installer/scripts/livenessprobe.cmd @@ -1,40 +1,32 @@ -echo "Checking if fluent-bit is running" +REM "Checking if fluent-bit is running" tasklist /fi "imagename eq fluent-bit.exe" /fo "table" | findstr fluent-bit IF ERRORLEVEL 1 ( - echo "Fluent-Bit is not running" > /dev/termination-log + echo "Fluent-Bit is not running" exit /b 1 -) ELSE ( - echo "Fluent-Bit is running" ) -echo "Checking if config map has been updated since agent start" +REM "Checking if config map has been updated since agent start" IF EXIST C:\etc\omsagentwindows\filesystemwatcher.txt ( - echo "Config Map Updated since agent started" > /dev/termination-log + echo "Config Map Updated since agent started" exit /b 1 -) ELSE ( - echo "Config Map not Updated since agent start" ) -echo "Checking if certificate needs to be renewed (aka agent restart required)" +REM "Checking if certificate needs to be renewed (aka agent restart required)" IF EXIST C:\etc\omsagentwindows\renewcertificate.txt ( - echo "Certificate needs to be renewed" > /dev/termination-log + echo "Certificate needs to be renewed" exit /b 1 -) ELSE ( - echo "Certificate does NOT need to be renewd" ) -echo "Checking if fluentd service is running" +REM "Checking if fluentd service is running" sc query fluentdwinaks | findstr /i STATE | findstr RUNNING IF ERRORLEVEL 1 ( - echo "Fluentd Service is NOT Running" > /dev/termination-log + echo "Fluentd Service is NOT Running" exit /b 1 -) ELSE ( - echo "Fluentd Service is Running" ) exit /b 0 diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 987841f77..a809a4e69 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.9 +version: 2.8.0 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 6a309c121..81003c704 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -27,6 +27,10 @@ spec: checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} spec: + dnsConfig: + options: + - name: ndots + value: "3" {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} nodeSelector: kubernetes.io/os: windows diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index d57c4d82b..3d29ede42 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -28,6 +28,10 @@ spec: checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: + dnsConfig: + options: + - name: ndots + value: "3" {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index 4f7408e7c..bd4e9baf3 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -19,7 +19,7 @@ metadata: heritage: {{ .Release.Service }} rules: - apiGroups: [""] - resources: ["pods", "events", "nodes", "nodes/stats", "nodes/metrics", "nodes/spec", "nodes/proxy", "namespaces", "services"] + resources: ["pods", "events", "nodes", "nodes/stats", "nodes/metrics", "nodes/spec", "nodes/proxy", "namespaces", "services", "persistentvolumes"] verbs: ["list", "get", "watch"] - apiGroups: ["apps", "extensions", "autoscaling"] resources: ["replicasets", "deployments", "horizontalpodautoscalers"] diff --git a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml index 475b17a46..fc7c471f8 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rs-configmap.yaml @@ -18,7 +18,14 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth + + + #Kubernetes Persistent Volume inventory + + type kubepvinventory + tag oms.containerinsights.KubePVInventory + run_interval 60 + log_level debug #Kubernetes events @@ -70,14 +77,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info @@ -90,7 +95,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubepods*.buffer @@ -102,12 +107,27 @@ data: max_retry_wait 5m - + type out_oms log_level debug num_threads 5 buffer_chunk_limit 4m buffer_type file + buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer + buffer_queue_limit 20 + buffer_queue_full_action drop_oldest_chunk + flush_interval 20s + retry_limit 10 + retry_wait 5s + max_retry_wait 5m + + + + type out_oms + log_level debug + num_threads 2 + buffer_chunk_limit 4m + buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeevents*.buffer buffer_queue_limit 20 buffer_queue_full_action drop_oldest_chunk @@ -135,7 +155,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/state/out_oms_kubenodes*.buffer @@ -164,7 +184,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeperf*.buffer diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 76ea0a26d..52e80dff8 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod11092020" - tagWindows: "win-ciprod11092020" + tag: "ciprod01112021" + tagWindows: "win-ciprod01112021" pullPolicy: IfNotPresent - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. @@ -56,6 +56,21 @@ omsagent: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - labelSelector: + matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/arch + operator: In + values: + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -78,9 +93,22 @@ omsagent: operator: NotIn values: - virtual-kubelet + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 deployment: affinity: nodeAffinity: + # affinity to schedule on to ephemeral os node if its available + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: storageprofile + operator: NotIn + values: + - managed requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - labelSelector: @@ -97,6 +125,10 @@ omsagent: operator: NotIn values: - master + - key: kubernetes.io/arch + operator: In + values: + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -112,6 +144,10 @@ omsagent: operator: NotIn values: - master + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## @@ -133,4 +169,4 @@ omsagent: memory: 250Mi limits: cpu: 1 - memory: 750Mi + memory: 1Gi diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index d04e86128..2e1118922 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod11092020 +ARG IMAGE_TAG=ciprod01112021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi @@ -15,6 +15,7 @@ ENV HOST_VAR /hostfs/var ENV AZMON_COLLECT_ENV False ENV KUBE_CLIENT_BACKOFF_BASE 1 ENV KUBE_CLIENT_BACKOFF_DURATION 0 +ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs mdsd.xml envmdsd $tmpdir/ WORKDIR ${tmpdir} diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index b093eb74b..2f88c07ac 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -150,6 +150,17 @@ else echo "LA Onboarding:Workspace Id not mounted, skipping the telemetry check" fi +# Set environment variable for if public cloud by checking the workspace domain. +if [ -z $domain ]; then + ClOUD_ENVIRONMENT="unknown" +elif [ $domain == "opinsights.azure.com" ]; then + CLOUD_ENVIRONMENT="public" +else + CLOUD_ENVIRONMENT="national" +fi +export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT +echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc + #Parse the configmap to set the right environment variables. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb @@ -160,14 +171,24 @@ done source config_env_var -#Parse the configmap to set the right environment variables for health feature. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-health-config.rb +#Parse the configmap to set the right environment variables for agent config. +#Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb + +cat agent_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc +done +source agent_config_env_var + +#Parse the configmap to set the right environment variables for network policy manager (npm) integration. +/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb -cat health_config_env_var | while read line; do +cat integration_npm_config_env_var | while read line; do #echo $line echo $line >> ~/.bashrc done -source health_config_env_var +source integration_npm_config_env_var #Parse the configmap to set the right environment variables for network policy manager (npm) integration. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb @@ -418,7 +439,7 @@ echo "export DOCKER_CIMPROV_VERSION=$DOCKER_CIMPROV_VERSION" >> ~/.bashrc #region check to auto-activate oneagent, to route container logs, #Intent is to activate one agent routing for all managed clusters with region in the regionllist, unless overridden by configmap -# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map +# AZMON_CONTAINER_LOGS_ROUTE will have route (if any) specified in the config map # AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE will have the final route that we compute & set, based on our region list logic echo "************start oneagent log routing checks************" # by default, use configmap route for safer side @@ -451,9 +472,9 @@ else echo "current region is not in oneagent regions..." fi -if [ "$isoneagentregion" = true ]; then +if [ "$isoneagentregion" = true ]; then #if configmap has a routing for logs, but current region is in the oneagent region list, take the configmap route - if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then + if [ ! -z $AZMON_CONTAINER_LOGS_ROUTE ]; then AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE=$AZMON_CONTAINER_LOGS_ROUTE echo "oneagent region is true for current region:$currentregion and config map logs route is not empty. so using config map logs route as effective route:$AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" else #there is no configmap route, so route thru oneagent @@ -500,7 +521,6 @@ if [ ! -e "/etc/config/kube.conf" ]; then echo "starting mdsd ..." mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & - touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 fi fi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index fb41d4782..352be06d7 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -71,7 +71,7 @@ chmod 777 /opt/telegraf wget -qO - https://packages.fluentbit.io/fluentbit.key | sudo apt-key add - sudo echo "deb https://packages.fluentbit.io/ubuntu/xenial xenial main" >> /etc/apt/sources.list sudo apt-get update -sudo apt-get install td-agent-bit=1.4.2 -y +sudo apt-get install td-agent-bit=1.6.8 -y rm -rf $TMPDIR/omsbundle rm -f $TMPDIR/omsagent*.sh diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 7d07eafcd..67bd9cdde 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -21,6 +21,7 @@ rules: "nodes/proxy", "namespaces", "services", + "persistentvolumes" ] verbs: ["list", "get", "watch"] - apiGroups: ["apps", "extensions", "autoscaling"] @@ -64,7 +65,14 @@ data: tag oms.containerinsights.KubePodInventory run_interval 60 log_level debug - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth + + + #Kubernetes Persistent Volume inventory + + type kubepvinventory + tag oms.containerinsights.KubePVInventory + run_interval 60 + log_level debug #Kubernetes events @@ -117,14 +125,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth log_level info #custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral,francecentral,japaneast,australiaeast,eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,pvUsedBytes log_level info @@ -137,7 +143,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubepods*.buffer @@ -149,10 +155,25 @@ data: max_retry_wait 5m + + type out_oms + log_level debug + num_threads 5 + buffer_chunk_limit 4m + buffer_type file + buffer_path %STATE_DIR_WS%/state/out_oms_kubepv*.buffer + buffer_queue_limit 20 + buffer_queue_full_action drop_oldest_chunk + flush_interval 20s + retry_limit 10 + retry_wait 5s + max_retry_wait 5m + + type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeevents*.buffer @@ -182,7 +203,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/state/out_oms_kubenodes*.buffer @@ -211,7 +232,7 @@ data: type out_oms log_level debug - num_threads 5 + num_threads 2 buffer_chunk_limit 4m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_kubeperf*.buffer @@ -337,17 +358,21 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent + dnsConfig: + options: + - name: ndots + value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" imagePullPolicy: IfNotPresent resources: limits: - cpu: 250m + cpu: 500m memory: 600Mi requests: cpu: 75m @@ -496,23 +521,22 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" imagePullPolicy: IfNotPresent resources: limits: cpu: 1 - memory: 750Mi + memory: 1Gi requests: cpu: 150m memory: 250Mi env: - # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these - name: AKS_RESOURCE_ID value: "VALUE_AKS_RESOURCE_ID_VALUE" - name: AKS_REGION @@ -567,6 +591,15 @@ spec: periodSeconds: 60 affinity: nodeAffinity: + # affinity to schedule on to ephemeral os node if its available + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + preference: + matchExpressions: + - key: storageprofile + operator: NotIn + values: + - managed requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - labelSelector: @@ -642,13 +675,17 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "11.0.0-1" + dockerProviderVersion: "12.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent + dnsConfig: + options: + - name: ndots + value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index 10ea235b2..f852bd236 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod11092020 +ARG IMAGE_TAG=win-ciprod01112021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 2e8659601..a297e3801 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -43,34 +43,32 @@ function Start-FileSystemWatcher { function Set-EnvironmentVariables { $domain = "opinsights.azure.com" + $cloud_environment = "public" if (Test-Path /etc/omsagent-secret/DOMAIN) { # TODO: Change to omsagent-secret before merging $domain = Get-Content /etc/omsagent-secret/DOMAIN + $cloud_environment = "national" } # Set DOMAIN [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Process") [System.Environment]::SetEnvironmentVariable("DOMAIN", $domain, "Machine") + # Set CLOUD_ENVIRONMENT + [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Process") + [System.Environment]::SetEnvironmentVariable("CLOUD_ENVIRONMENT", $cloud_environment, "Machine") + $wsID = "" if (Test-Path /etc/omsagent-secret/WSID) { # TODO: Change to omsagent-secret before merging $wsID = Get-Content /etc/omsagent-secret/WSID } - # Set DOMAIN + # Set WSID [System.Environment]::SetEnvironmentVariable("WSID", $wsID, "Process") [System.Environment]::SetEnvironmentVariable("WSID", $wsID, "Machine") - $wsKey = "" - if (Test-Path /etc/omsagent-secret/KEY) { - # TODO: Change to omsagent-secret before merging - $wsKey = Get-Content /etc/omsagent-secret/KEY - } - - # Set KEY - [System.Environment]::SetEnvironmentVariable("WSKEY", $wsKey, "Process") - [System.Environment]::SetEnvironmentVariable("WSKEY", $wsKey, "Machine") + # Don't store WSKEY as environment variable $proxy = "" if (Test-Path /etc/omsagent-secret/PROXY) { diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index 1c011bfff..bcd135dba 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -15,6 +15,8 @@ tenantId of the service principal which will be used for the azure login .PARAMETER kubeContext (optional) kube-context of the k8 cluster to install Azure Monitor for containers HELM chart + .PARAMETER azureCloudName (optional) + Name of the Azure cloud name. Supported Azure cloud Name is AzureCloud or AzureUSGovernment Pre-requisites: - Azure Managed cluster Resource Id @@ -34,7 +36,9 @@ param( [Parameter(mandatory = $false)] [string]$tenantId, [Parameter(mandatory = $false)] - [string]$kubeContext + [string]$kubeContext, + [Parameter(mandatory = $false)] + [string]$azureCloudName ) $helmChartReleaseName = "azmon-containers-release-1" @@ -46,6 +50,21 @@ $isAksCluster = $false $isAroV4Cluster = $false $isUsingServicePrincipal = $false +if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { + Write-Host("Azure cloud name parameter not passed in so using default cloud as AzureCloud") + $azureCloudName = "AzureCloud" +} else { + if(($azureCloudName.ToLower() -eq "azurecloud" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + } elseif (($azureCloudName.ToLower() -eq "azureusgovernment" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + } else { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + Write-Host("Only supported Azure clouds are : AzureCloud and AzureUSGovernment") + exit + } +} + # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts $azResourcesModule = Get-Module -ListAvailable -Name Az.Resources @@ -226,14 +245,19 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } try { Write-Host("") Write-Host("Trying to get the current Az login context...") $account = Get-AzContext -ErrorAction Stop - Write-Host("Successfully fetched current AzContext context...") -ForegroundColor Green + $ctxCloud = $account.Environment.Name + if(($azureCloudName.ToLower() -eq $ctxCloud.ToLower() ) -eq $false) { + Write-Host("Specified azure cloud name is not same as current context cloud hence setting account to null to retrigger the login" ) -ForegroundColor Green + $account = $null + } + Write-Host("Successfully fetched current AzContext context and azure cloud name: $azureCloudName" ) -ForegroundColor Green Write-Host("") } catch { @@ -249,10 +273,10 @@ if ($null -eq $account.Account) { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } else { Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + Connect-AzAccount -subscriptionid $clusterSubscriptionId -Environment $azureCloudName } } catch { diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index c11426f30..d43a79f51 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -280,10 +280,27 @@ done } +validate_and_configure_supported_cloud() { + echo "get active azure cloud name configured to azure cli" + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + echo "active azure cloud name configured to azure cli: ${azureCloudName}" + if [ "$isArcK8sCluster" = true ]; then + if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then + echo "-e only supported clouds are AzureCloud and AzureUSGovernment for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + else + # For ARO v4, only supported cloud is public so just configure to public to keep the existing behavior + configure_to_public_cloud + fi +} # parse args parse_args $@ +# validate and configure azure cloud +validate_and_configure_supported_cloud + # parse cluster resource id clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" clusterResourceGroup="$(echo $clusterResourceId | cut -d'/' -f5)" diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index b052f22c5..5e9cb8a25 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -22,6 +22,8 @@ .PARAMETER proxyEndpoint (optional) Provide Proxy endpoint if you have K8s cluster behind the proxy and would like to route Azure Monitor for containers outbound traffic via proxy. Format of the proxy endpoint should be http(s://:@: + .PARAMETER azureCloudName (optional) + Name of the Azure cloud name. Supported Azure cloud Name is AzureCloud or AzureUSGovernment Pre-requisites: - Azure Managed cluster Resource Id @@ -44,9 +46,13 @@ param( [Parameter(mandatory = $false)] [string]$kubeContext, [Parameter(mandatory = $false)] - [string]$workspaceResourceId, + [string]$servicePrincipalClientSecret, + [Parameter(mandatory = $false)] + [string]$tenantId, + [Parameter(mandatory = $false)] + [string]$kubeContext, [Parameter(mandatory = $false)] - [string]$proxyEndpoint + [string]$azureCloudName ) $solutionTemplateUri = "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" @@ -60,9 +66,27 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.7.9" +$mcrChartVersion = "2.8.0" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." +$omsAgentDomainName="opinsights.azure.com" + +if ([string]::IsNullOrEmpty($azureCloudName) -eq $true) { + Write-Host("Azure cloud name parameter not passed in so using default cloud as AzureCloud") + $azureCloudName = "AzureCloud" +} else { + if(($azureCloudName.ToLower() -eq "azurecloud" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + $omsAgentDomainName="opinsights.azure.com" + } elseif (($azureCloudName.ToLower() -eq "azureusgovernment" ) -eq $true) { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + $omsAgentDomainName="opinsights.azure.us" + } else { + Write-Host("Specified Azure Cloud name is : $azureCloudName") + Write-Host("Only supported azure clouds are : AzureCloud and AzureUSGovernment") + exit + } +} # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts @@ -244,14 +268,19 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } try { Write-Host("") Write-Host("Trying to get the current Az login context...") $account = Get-AzContext -ErrorAction Stop - Write-Host("Successfully fetched current AzContext context...") -ForegroundColor Green + $ctxCloud = $account.Environment.Name + if(($azureCloudName.ToLower() -eq $ctxCloud.ToLower() ) -eq $false) { + Write-Host("Specified azure cloud name is not same as current context cloud hence setting account to null to retrigger the login" ) -ForegroundColor Green + $account = $null + } + Write-Host("Successfully fetched current AzContext context and azure cloud name: $azureCloudName" ) -ForegroundColor Green Write-Host("") } catch { @@ -266,11 +295,12 @@ if ($null -eq $account.Account) { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret - Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId + + Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId -Environment $azureCloudName } else { Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId + Connect-AzAccount -subscriptionid $clusterSubscriptionId -Environment $azureCloudName } } catch { @@ -380,7 +410,8 @@ if ([string]::IsNullOrEmpty($workspaceResourceId)) { "westeurope" = "westeurope" ; "westindia" = "centralindia" ; "westus" = "westus" ; - "westus2" = "westus2" + "westus2" = "westus2"; + "usgovvirginia" = "usgovvirginia" } $workspaceRegionCode = "EUS" @@ -531,7 +562,7 @@ try { Write-Host("helmChartRepoPath is : ${helmChartRepoPath}") - $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" + $helmParameters = "omsagent.domain=$omsAgentDomainName,omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" if ([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { Write-Host("using proxy endpoint since its provided") $helmParameters = $helmParameters + ",omsagent.proxy=$proxyEndpoint" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index bb6974258..2dc0a465f 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -38,11 +38,13 @@ set -e set -o pipefail -# default to public cloud since only supported cloud is azure public clod +# default to public cloud since only supported cloud is azure public cloud defaultAzureCloud="AzureCloud" +# default domain will be for public cloud +omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.7.9" +mcrChartVersion="2.8.0" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." @@ -307,6 +309,25 @@ parse_args() { } +validate_and_configure_supported_cloud() { + echo "get active azure cloud name configured to azure cli" + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + echo "active azure cloud name configured to azure cli: ${azureCloudName}" + if [ "$isArcK8sCluster" = true ]; then + if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then + echo "-e only supported clouds are AzureCloud and AzureUSGovernment for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + if [ "$azureCloudName" = "azureusgovernment" ]; then + echo "setting omsagent domain as opinsights.azure.us since the azure cloud is azureusgovernment " + omsAgentDomainName="opinsights.azure.us" + fi + else + # For ARO v4, only supported cloud is public so just configure to public to keep the existing behavior + configure_to_public_cloud + fi +} + configure_to_public_cloud() { echo "Set AzureCloud as active cloud for az cli" az cloud set -n $defaultAzureCloud @@ -398,8 +419,10 @@ create_default_log_analytics_workspace() { [westindia]=centralindia [westus]=westus [westus2]=westus2 + [usgovvirginia]=usgovvirginia ) + echo "cluster Region:"$clusterRegion if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; then workspaceRegion=${AzureCloudRegionToOmsRegionMap[$clusterRegion]} fi @@ -433,6 +456,7 @@ create_default_log_analytics_workspace() { workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') + echo "workspace resource Id: ${workspaceResourceId}" } add_container_insights_solution() { @@ -504,18 +528,18 @@ install_helm_chart() { echo "using proxy endpoint since proxy configuration passed in" if [ -z "$kubeconfigContext" ]; then echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath else echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} fi else if [ -z "$kubeconfigContext" ]; then echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath else echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + helm upgrade --install $releaseName --set omsagent.domain=$omsAgentDomainName,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} fi fi @@ -560,8 +584,8 @@ enable_aks_monitoring_addon() { # parse and validate args parse_args $@ -# configure azure cli for public cloud -configure_to_public_cloud +# validate and configure azure cli for cloud +validate_and_configure_supported_cloud # parse cluster resource id clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 11ecf6819..8826b6df6 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.7.9" +mcrChartVersion="2.8.0" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" @@ -281,11 +281,26 @@ set_azure_subscription() { echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" } +validate_and_configure_supported_cloud() { + echo "get active azure cloud name configured to azure cli" + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + echo "active azure cloud name configured to azure cli: ${azureCloudName}" + if [ "$isArcK8sCluster" = true ]; then + if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then + echo "-e only supported clouds are AzureCloud and AzureUSGovernment for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + else + # For ARO v4, only supported cloud is public so just configure to public to keep the existing behavior + configure_to_public_cloud + fi +} + # parse and validate args parse_args $@ -# configure azure cli for public cloud -configure_to_public_cloud +# configure azure cli for cloud +validate_and_configure_supported_cloud # parse cluster resource id clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" diff --git a/scripts/preview/health/omsagent-template-aks-engine.yaml b/scripts/preview/health/omsagent-template-aks-engine.yaml index 5526602c0..5e063fd54 100644 --- a/scripts/preview/health/omsagent-template-aks-engine.yaml +++ b/scripts/preview/health/omsagent-template-aks-engine.yaml @@ -108,14 +108,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info diff --git a/scripts/preview/health/omsagent-template.yaml b/scripts/preview/health/omsagent-template.yaml index 6e3a52020..e58e9c33f 100644 --- a/scripts/preview/health/omsagent-template.yaml +++ b/scripts/preview/health/omsagent-template.yaml @@ -108,14 +108,12 @@ data: type filter_inventory2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope log_level info # custom_metrics_mdm filter plugin for perf data from windows nodes type filter_cadvisor2mdm - custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westEurope metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes log_level info diff --git a/source/plugins/ruby/CustomMetricsUtils.rb b/source/plugins/ruby/CustomMetricsUtils.rb index a19580630..220313e6b 100644 --- a/source/plugins/ruby/CustomMetricsUtils.rb +++ b/source/plugins/ruby/CustomMetricsUtils.rb @@ -6,21 +6,15 @@ def initialize end class << self - def check_custom_metrics_availability(custom_metric_regions) + def check_custom_metrics_availability aks_region = ENV['AKS_REGION'] aks_resource_id = ENV['AKS_RESOURCE_ID'] + aks_cloud_environment = ENV['CLOUD_ENVIRONMENT'] if aks_region.to_s.empty? || aks_resource_id.to_s.empty? return false # This will also take care of AKS-Engine Scenario. AKS_REGION/AKS_RESOURCE_ID is not set for AKS-Engine. Only ACS_RESOURCE_NAME is set end - custom_metrics_regions_arr = custom_metric_regions.split(',') - custom_metrics_regions_hash = custom_metrics_regions_arr.map {|x| [x.downcase,true]}.to_h - - if custom_metrics_regions_hash.key?(aks_region.downcase) - true - else - false - end + return aks_cloud_environment.to_s.downcase == 'public' end end end \ No newline at end of file diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index 073eb0417..aca2142a0 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -172,6 +172,10 @@ def isAROV3Cluster return @@IsAROV3Cluster end + def isAROv3MasterOrInfraPod(nodeName) + return isAROV3Cluster() && (!nodeName.nil? && (nodeName.downcase.start_with?("infra-") || nodeName.downcase.start_with?("master-"))) + end + def isNodeMaster return @@IsNodeMaster if !@@IsNodeMaster.nil? @@IsNodeMaster = false @@ -276,7 +280,8 @@ def getPods(namespace) def getWindowsNodes winNodes = [] begin - resourceUri = getNodesResourceUri("nodes") + # get only windows nodes + resourceUri = getNodesResourceUri("nodes?labelSelector=kubernetes.io%2Fos%3Dwindows") nodeInventory = JSON.parse(getKubeResourceInfo(resourceUri).body) @Log.info "KubernetesAPIClient::getWindowsNodes : Got nodes from kube api" # Resetting the windows node cache @@ -396,42 +401,67 @@ def getPodUid(podNameSpace, podMetadata) return podUid end - def getContainerResourceRequestsAndLimits(metricJSON, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) metricItems = [] begin clusterId = getClusterId - metricInfo = metricJSON - metricInfo["items"].each do |pod| - podNameSpace = pod["metadata"]["namespace"] - podUid = getPodUid(podNameSpace, pod["metadata"]) - if podUid.nil? - next - end - - # For ARO, skip the pods scheduled on to master or infra nodes to ingest - if isAROV3Cluster() && !pod["spec"].nil? && !pod["spec"]["nodeName"].nil? && - (pod["spec"]["nodeName"].downcase.start_with?("infra-") || - pod["spec"]["nodeName"].downcase.start_with?("master-")) - next - end + podNameSpace = pod["metadata"]["namespace"] + podUid = getPodUid(podNameSpace, pod["metadata"]) + if podUid.nil? + return metricItems + end - podContainers = [] - if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? - podContainers = podContainers + pod["spec"]["containers"] - end - # Adding init containers to the record list as well. - if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? - podContainers = podContainers + pod["spec"]["initContainers"] - end + nodeName = "" + #for unscheduled (non-started) pods nodeName does NOT exist + if !pod["spec"]["nodeName"].nil? + nodeName = pod["spec"]["nodeName"] + end + # For ARO, skip the pods scheduled on to master or infra nodes to ingest + if isAROv3MasterOrInfraPod(nodeName) + return metricItems + end - if (!podContainers.nil? && !podContainers.empty? && !pod["spec"]["nodeName"].nil?) - nodeName = pod["spec"]["nodeName"] - podContainers.each do |container| - containerName = container["name"] - #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z - if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) - metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) + podContainers = [] + if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? + podContainers = podContainers + pod["spec"]["containers"] + end + # Adding init containers to the record list as well. + if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? + podContainers = podContainers + pod["spec"]["initContainers"] + end + if (!podContainers.nil? && !podContainers.empty? && !pod["spec"]["nodeName"].nil?) + podContainers.each do |container| + containerName = container["name"] + #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z + if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) + metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) + + metricItem = {} + metricItem["DataItems"] = [] + + metricProps = {} + metricProps["Timestamp"] = metricTime + metricProps["Host"] = nodeName + # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent + metricProps["Computer"] = nodeName + metricProps["ObjectName"] = "K8SContainer" + metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName + + metricProps["Collections"] = [] + metricCollections = {} + metricCollections["CounterName"] = metricNametoReturn + metricCollections["Value"] = metricValue + + metricProps["Collections"].push(metricCollections) + metricItem["DataItems"].push(metricProps) + metricItems.push(metricItem) + #No container level limit for the given metric, so default to node level limit + else + nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect + if (metricCategory == "limits" && @@NodeMetrics.has_key?(nodeMetricsHashKey)) + metricValue = @@NodeMetrics[nodeMetricsHashKey] + #@Log.info("Limits not set for container #{clusterId + "/" + podUid + "/" + containerName} using node level limits: #{nodeMetricsHashKey}=#{metricValue} ") metricItem = {} metricItem["DataItems"] = [] @@ -451,32 +481,6 @@ def getContainerResourceRequestsAndLimits(metricJSON, metricCategory, metricName metricProps["Collections"].push(metricCollections) metricItem["DataItems"].push(metricProps) metricItems.push(metricItem) - #No container level limit for the given metric, so default to node level limit - else - nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect - if (metricCategory == "limits" && @@NodeMetrics.has_key?(nodeMetricsHashKey)) - metricValue = @@NodeMetrics[nodeMetricsHashKey] - #@Log.info("Limits not set for container #{clusterId + "/" + podUid + "/" + containerName} using node level limits: #{nodeMetricsHashKey}=#{metricValue} ") - metricItem = {} - metricItem["DataItems"] = [] - - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = nodeName - # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent - metricProps["Computer"] = nodeName - metricProps["ObjectName"] = "K8SContainer" - metricProps["InstanceName"] = clusterId + "/" + podUid + "/" + containerName - - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) - metricItems.push(metricItem) - end end end end @@ -488,78 +492,74 @@ def getContainerResourceRequestsAndLimits(metricJSON, metricCategory, metricName return metricItems end #getContainerResourceRequestAndLimits - def getContainerResourceRequestsAndLimitsAsInsightsMetrics(metricJSON, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + def getContainerResourceRequestsAndLimitsAsInsightsMetrics(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) metricItems = [] begin clusterId = getClusterId clusterName = getClusterName - - metricInfo = metricJSON - metricInfo["items"].each do |pod| - podNameSpace = pod["metadata"]["namespace"] - if podNameSpace.eql?("kube-system") && !pod["metadata"].key?("ownerReferences") - # The above case seems to be the only case where you have horizontal scaling of pods - # but no controller, in which case cAdvisor picks up kubernetes.io/config.hash - # instead of the actual poduid. Since this uid is not being surface into the UX - # its ok to use this. - # Use kubernetes.io/config.hash to be able to correlate with cadvisor data - if pod["metadata"]["annotations"].nil? - next - else - podUid = pod["metadata"]["annotations"]["kubernetes.io/config.hash"] - end + podNameSpace = pod["metadata"]["namespace"] + if podNameSpace.eql?("kube-system") && !pod["metadata"].key?("ownerReferences") + # The above case seems to be the only case where you have horizontal scaling of pods + # but no controller, in which case cAdvisor picks up kubernetes.io/config.hash + # instead of the actual poduid. Since this uid is not being surface into the UX + # its ok to use this. + # Use kubernetes.io/config.hash to be able to correlate with cadvisor data + if pod["metadata"]["annotations"].nil? + return metricItems else - podUid = pod["metadata"]["uid"] + podUid = pod["metadata"]["annotations"]["kubernetes.io/config.hash"] end + else + podUid = pod["metadata"]["uid"] + end - podContainers = [] - if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? - podContainers = podContainers + pod["spec"]["containers"] - end - # Adding init containers to the record list as well. - if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? - podContainers = podContainers + pod["spec"]["initContainers"] - end + podContainers = [] + if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty? + podContainers = podContainers + pod["spec"]["containers"] + end + # Adding init containers to the record list as well. + if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty? + podContainers = podContainers + pod["spec"]["initContainers"] + end - if (!podContainers.nil? && !podContainers.empty?) - if (!pod["spec"]["nodeName"].nil?) - nodeName = pod["spec"]["nodeName"] + if (!podContainers.nil? && !podContainers.empty?) + if (!pod["spec"]["nodeName"].nil?) + nodeName = pod["spec"]["nodeName"] + else + nodeName = "" #unscheduled pod. We still want to collect limits & requests for GPU + end + podContainers.each do |container| + metricValue = nil + containerName = container["name"] + #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z + if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) + metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) else - nodeName = "" #unscheduled pod. We still want to collect limits & requests for GPU - end - podContainers.each do |container| - metricValue = nil - containerName = container["name"] - #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z - if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?) - metricValue = getMetricNumericValue(metricNameToCollect, container["resources"][metricCategory][metricNameToCollect]) - else - #No container level limit for the given metric, so default to node level limit for non-gpu metrics - if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") - nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect - metricValue = @@NodeMetrics[nodeMetricsHashKey] - end - end - if (!metricValue.nil?) - metricItem = {} - metricItem["CollectionTime"] = metricTime - metricItem["Computer"] = nodeName - metricItem["Name"] = metricNametoReturn - metricItem["Value"] = metricValue - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] = podUid + "/" + containerName - #metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = podNameSpace - - metricItem["Tags"] = metricTags - - metricItems.push(metricItem) + #No container level limit for the given metric, so default to node level limit for non-gpu metrics + if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") + nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect + metricValue = @@NodeMetrics[nodeMetricsHashKey] end end + if (!metricValue.nil?) + metricItem = {} + metricItem["CollectionTime"] = metricTime + metricItem["Computer"] = nodeName + metricItem["Name"] = metricNametoReturn + metricItem["Value"] = metricValue + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] = podUid + "/" + containerName + #metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = podNameSpace + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) + end end end rescue => error @@ -578,32 +578,9 @@ def parseNodeLimits(metricJSON, metricCategory, metricNameToCollect, metricNamet #if we are coming up with the time it should be same for all nodes #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z metricInfo["items"].each do |node| - if (!node["status"][metricCategory].nil?) - - # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" - metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) - - metricItem = {} - metricItem["DataItems"] = [] - metricProps = {} - metricProps["Timestamp"] = metricTime - metricProps["Host"] = node["metadata"]["name"] - # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent - metricProps["Computer"] = node["metadata"]["name"] - metricProps["ObjectName"] = "K8SNode" - metricProps["InstanceName"] = clusterId + "/" + node["metadata"]["name"] - metricProps["Collections"] = [] - metricCollections = {} - metricCollections["CounterName"] = metricNametoReturn - metricCollections["Value"] = metricValue - - metricProps["Collections"].push(metricCollections) - metricItem["DataItems"].push(metricProps) + metricItem = parseNodeLimitsFromNodeItem(node, metricCategory, metricNameToCollect, metricNametoReturn, metricTime) + if !metricItem.nil? && !metricItem.empty? metricItems.push(metricItem) - #push node level metrics to a inmem hash so that we can use it looking up at container level. - #Currently if container level cpu & memory limits are not defined we default to node level limits - @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue - #@Log.info ("Node metric hash: #{@@NodeMetrics}") end end rescue => error @@ -612,49 +589,82 @@ def parseNodeLimits(metricJSON, metricCategory, metricNameToCollect, metricNamet return metricItems end #parseNodeLimits - def parseNodeLimitsAsInsightsMetrics(metricJSON, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) - metricItems = [] + def parseNodeLimitsFromNodeItem(node, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + metricItem = {} begin - metricInfo = metricJSON clusterId = getClusterId - clusterName = getClusterName #Since we are getting all node data at the same time and kubernetes doesnt specify a timestamp for the capacity and allocation metrics, #if we are coming up with the time it should be same for all nodes #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z - metricInfo["items"].each do |node| - if (!node["status"][metricCategory].nil?) && (!node["status"][metricCategory][metricNameToCollect].nil?) - - # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" or "amd.com/gpu" or "nvidia.com/gpu" - metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) - - metricItem = {} - metricItem["CollectionTime"] = metricTime - metricItem["Computer"] = node["metadata"]["name"] - metricItem["Name"] = metricNametoReturn - metricItem["Value"] = metricValue - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_VENDOR] = metricNameToCollect - - metricItem["Tags"] = metricTags + if (!node["status"][metricCategory].nil?) && (!node["status"][metricCategory][metricNameToCollect].nil?) + # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" + metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) + + metricItem["DataItems"] = [] + metricProps = {} + metricProps["Timestamp"] = metricTime + metricProps["Host"] = node["metadata"]["name"] + # Adding this so that it is not set by base omsagent since it was not set earlier and being set by base omsagent + metricProps["Computer"] = node["metadata"]["name"] + metricProps["ObjectName"] = "K8SNode" + metricProps["InstanceName"] = clusterId + "/" + node["metadata"]["name"] + metricProps["Collections"] = [] + metricCollections = {} + metricCollections["CounterName"] = metricNametoReturn + metricCollections["Value"] = metricValue + + metricProps["Collections"].push(metricCollections) + metricItem["DataItems"].push(metricProps) + + #push node level metrics to a inmem hash so that we can use it looking up at container level. + #Currently if container level cpu & memory limits are not defined we default to node level limits + @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue + #@Log.info ("Node metric hash: #{@@NodeMetrics}") + end + rescue => error + @Log.warn("parseNodeLimitsFromNodeItem failed: #{error} for metric #{metricCategory} #{metricNameToCollect}") + end + return metricItem + end #parseNodeLimitsFromNodeItem - metricItems.push(metricItem) - #push node level metrics (except gpu ones) to a inmem hash so that we can use it looking up at container level. - #Currently if container level cpu & memory limits are not defined we default to node level limits - if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") - @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue - #@Log.info ("Node metric hash: #{@@NodeMetrics}") - end + def parseNodeLimitsAsInsightsMetrics(node, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) + metricItem = {} + begin + #Since we are getting all node data at the same time and kubernetes doesnt specify a timestamp for the capacity and allocation metrics, + #if we are coming up with the time it should be same for all nodes + #metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z + if (!node["status"][metricCategory].nil?) && (!node["status"][metricCategory][metricNameToCollect].nil?) + clusterId = getClusterId + clusterName = getClusterName + + # metricCategory can be "capacity" or "allocatable" and metricNameToCollect can be "cpu" or "memory" or "amd.com/gpu" or "nvidia.com/gpu" + metricValue = getMetricNumericValue(metricNameToCollect, node["status"][metricCategory][metricNameToCollect]) + + metricItem["CollectionTime"] = metricTime + metricItem["Computer"] = node["metadata"]["name"] + metricItem["Name"] = metricNametoReturn + metricItem["Value"] = metricValue + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_VENDOR] = metricNameToCollect + + metricItem["Tags"] = metricTags + + #push node level metrics (except gpu ones) to a inmem hash so that we can use it looking up at container level. + #Currently if container level cpu & memory limits are not defined we default to node level limits + if (metricNameToCollect.downcase != "nvidia.com/gpu") && (metricNameToCollect.downcase != "amd.com/gpu") + @@NodeMetrics[clusterId + "/" + node["metadata"]["name"] + "_" + metricCategory + "_" + metricNameToCollect] = metricValue + #@Log.info ("Node metric hash: #{@@NodeMetrics}") end end rescue => error @Log.warn("parseNodeLimitsAsInsightsMetrics failed: #{error} for metric #{metricCategory} #{metricNameToCollect}") end - return metricItems + return metricItem end def getMetricNumericValue(metricName, metricVal) @@ -777,5 +787,32 @@ def getKubeAPIServerUrl end return apiServerUrl end + + def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) + kubeServiceRecords = [] + begin + if (!serviceList.nil? && !serviceList.empty?) + servicesCount = serviceList["items"].length + @Log.info("KubernetesApiClient::getKubeServicesInventoryRecords : number of services in serviceList #{servicesCount} @ #{Time.now.utc.iso8601}") + serviceList["items"].each do |item| + kubeServiceRecord = {} + kubeServiceRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + kubeServiceRecord["ServiceName"] = item["metadata"]["name"] + kubeServiceRecord["Namespace"] = item["metadata"]["namespace"] + kubeServiceRecord["SelectorLabels"] = [item["spec"]["selector"]] + # added these before emit to avoid memory foot print + # kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId + # kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName + kubeServiceRecord["ClusterIP"] = item["spec"]["clusterIP"] + kubeServiceRecord["ServiceType"] = item["spec"]["type"] + kubeServiceRecords.push(kubeServiceRecord.dup) + end + end + rescue => errorStr + @Log.warn "KubernetesApiClient::getKubeServicesInventoryRecords:Failed with an error : #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + return kubeServiceRecords + end end end diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index 0e5099c5e..cf41900dc 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -1,61 +1,61 @@ # frozen_string_literal: true class Constants - INSIGHTSMETRICS_TAGS_ORIGIN = "container.azm.ms" - INSIGHTSMETRICS_TAGS_CLUSTERID = "container.azm.ms/clusterId" - INSIGHTSMETRICS_TAGS_CLUSTERNAME = "container.azm.ms/clusterName" - INSIGHTSMETRICS_TAGS_GPU_VENDOR = "gpuVendor" - INSIGHTSMETRICS_TAGS_GPU_NAMESPACE = "container.azm.ms/gpu" - INSIGHTSMETRICS_TAGS_GPU_MODEL = "gpuModel" - INSIGHTSMETRICS_TAGS_GPU_ID = "gpuId" - INSIGHTSMETRICS_TAGS_CONTAINER_NAME = "containerName" - INSIGHTSMETRICS_TAGS_CONTAINER_ID = "containerName" - INSIGHTSMETRICS_TAGS_K8SNAMESPACE = "k8sNamespace" - INSIGHTSMETRICS_TAGS_CONTROLLER_NAME = "controllerName" - INSIGHTSMETRICS_TAGS_CONTROLLER_KIND = "controllerKind" - INSIGHTSMETRICS_TAGS_POD_UID = "podUid" - INSIGTHTSMETRICS_TAGS_PV_NAMESPACE = "container.azm.ms/pv" - INSIGHTSMETRICS_TAGS_PVC_NAME = "pvcName" - INSIGHTSMETRICS_TAGS_PVC_NAMESPACE = "pvcNamespace" - INSIGHTSMETRICS_TAGS_POD_NAME = "podName" - INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES = "pvCapacityBytes" - INSIGHTSMETRICS_TAGS_VOLUME_NAME = "volumeName" - INSIGHTSMETRICS_FLUENT_TAG = "oms.api.InsightsMetrics" - REASON_OOM_KILLED = "oomkilled" - #Kubestate (common) - INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE = "container.azm.ms/kubestate" - INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME = "creationTime" - #Kubestate (deployments) - INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE = "kube_deployment_status_replicas_ready" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME = "deployment" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_CREATIONTIME = "creationTime" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY = "deploymentStrategy" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS = "spec_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED = "status_replicas_updated" - INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE = "status_replicas_available" - #Kubestate (HPA) - INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE = "kube_hpa_status_current_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME = "hpa" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS = "spec_max_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS = "spec_min_replicas" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND = "targetKind" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME = "targetName" - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS = "status_desired_replicas" - - INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME = "lastScaleTime" - # MDM Metric names - MDM_OOM_KILLED_CONTAINER_COUNT = "oomKilledContainerCount" - MDM_CONTAINER_RESTART_COUNT = "restartingContainerCount" - MDM_POD_READY_PERCENTAGE = "podReadyPercentage" - MDM_STALE_COMPLETED_JOB_COUNT = "completedJobsCount" - MDM_DISK_USED_PERCENTAGE = "diskUsedPercentage" - MDM_CONTAINER_CPU_UTILIZATION_METRIC = "cpuExceededPercentage" - MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" - MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" - MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" - MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" - MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" - MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" + INSIGHTSMETRICS_TAGS_ORIGIN = "container.azm.ms" + INSIGHTSMETRICS_TAGS_CLUSTERID = "container.azm.ms/clusterId" + INSIGHTSMETRICS_TAGS_CLUSTERNAME = "container.azm.ms/clusterName" + INSIGHTSMETRICS_TAGS_GPU_VENDOR = "gpuVendor" + INSIGHTSMETRICS_TAGS_GPU_NAMESPACE = "container.azm.ms/gpu" + INSIGHTSMETRICS_TAGS_GPU_MODEL = "gpuModel" + INSIGHTSMETRICS_TAGS_GPU_ID = "gpuId" + INSIGHTSMETRICS_TAGS_CONTAINER_NAME = "containerName" + INSIGHTSMETRICS_TAGS_CONTAINER_ID = "containerName" + INSIGHTSMETRICS_TAGS_K8SNAMESPACE = "k8sNamespace" + INSIGHTSMETRICS_TAGS_CONTROLLER_NAME = "controllerName" + INSIGHTSMETRICS_TAGS_CONTROLLER_KIND = "controllerKind" + INSIGHTSMETRICS_TAGS_POD_UID = "podUid" + INSIGTHTSMETRICS_TAGS_PV_NAMESPACE = "container.azm.ms/pv" + INSIGHTSMETRICS_TAGS_PVC_NAME = "pvcName" + INSIGHTSMETRICS_TAGS_PVC_NAMESPACE = "pvcNamespace" + INSIGHTSMETRICS_TAGS_POD_NAME = "podName" + INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES = "pvCapacityBytes" + INSIGHTSMETRICS_TAGS_VOLUME_NAME = "volumeName" + INSIGHTSMETRICS_FLUENT_TAG = "oms.api.InsightsMetrics" + REASON_OOM_KILLED = "oomkilled" + #Kubestate (common) + INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE = "container.azm.ms/kubestate" + INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME = "creationTime" + #Kubestate (deployments) + INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE = "kube_deployment_status_replicas_ready" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME = "deployment" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_CREATIONTIME = "creationTime" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY = "deploymentStrategy" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS = "spec_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED = "status_replicas_updated" + INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE = "status_replicas_available" + #Kubestate (HPA) + INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE = "kube_hpa_status_current_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME = "hpa" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS = "spec_max_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS = "spec_min_replicas" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND = "targetKind" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME = "targetName" + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS = "status_desired_replicas" + + INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME = "lastScaleTime" + # MDM Metric names + MDM_OOM_KILLED_CONTAINER_COUNT = "oomKilledContainerCount" + MDM_CONTAINER_RESTART_COUNT = "restartingContainerCount" + MDM_POD_READY_PERCENTAGE = "podReadyPercentage" + MDM_STALE_COMPLETED_JOB_COUNT = "completedJobsCount" + MDM_DISK_USED_PERCENTAGE = "diskUsedPercentage" + MDM_CONTAINER_CPU_UTILIZATION_METRIC = "cpuExceededPercentage" + MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" + MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" + MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" + MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" + MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" + MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" CONTAINER_TERMINATED_RECENTLY_IN_MINUTES = 5 OBJECT_NAME_K8S_CONTAINER = "K8SContainer" @@ -77,6 +77,9 @@ class Constants OMSAGENT_ZERO_FILL = "omsagent" KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" VOLUME_NAME_ZERO_FILL = "-" + PV_TYPES =["awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "cinder", "csi", "fc", "flexVolume", + "flocker", "gcePersistentDisk", "glusterfs", "hostPath", "iscsi", "local", "nfs", + "photonPersistentDisk", "portworxVolume", "quobyte", "rbd", "scaleIO", "storageos", "vsphereVolume"] #Telemetry constants CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" @@ -84,10 +87,13 @@ class Constants CONTAINER_RESOURCE_UTIL_HEART_BEAT_EVENT = "ContainerResourceUtilMdmHeartBeatEvent" PV_USAGE_HEART_BEAT_EVENT = "PVUsageMdmHeartBeatEvent" PV_KUBE_SYSTEM_METRICS_ENABLED_EVENT = "CollectPVKubeSystemMetricsEnabled" + PV_INVENTORY_HEART_BEAT_EVENT = "KubePVInventoryHeartBeatEvent" TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 10 KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES = 15 ZERO_FILL_METRICS_INTERVAL_IN_MINUTES = 30 MDM_TIME_SERIES_FLUSHED_IN_LAST_HOUR = "MdmTimeSeriesFlushedInLastHour" + MDM_EXCEPTION_TELEMETRY_METRIC = "AKSCustomMetricsMdmExceptions" + MDM_EXCEPTIONS_METRIC_FLUSH_INTERVAL = 30 #Pod Statuses POD_STATUS_TERMINATING = "Terminating" diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 3bc674ea8..8d7e729c8 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -15,7 +15,6 @@ class CAdvisor2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_cadvisor2mdm.log" - config_param :custom_metrics_azure_regions, :string config_param :metrics_to_collect, :string, :default => "Constants::CPU_USAGE_NANO_CORES,Constants::MEMORY_WORKING_SET_BYTES,Constants::MEMORY_RSS_BYTES,Constants::PV_USED_BYTES" @@hostName = (OMS::Common.get_hostname) @@ -42,7 +41,7 @@ def configure(conf) def start super begin - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(@custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @metrics_to_collect_hash = build_metrics_hash @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @@containerResourceUtilTelemetryTimeTracker = DateTime.now.to_time.to_i @@ -309,8 +308,16 @@ def ensure_cpu_memory_capacity_set end elsif controller_type.downcase == "daemonset" capacity_from_kubelet = KubeletUtils.get_node_capacity - @cpu_capacity = capacity_from_kubelet[0] - @memory_capacity = capacity_from_kubelet[1] + + # Error handling in case /metrics/cadvsior endpoint fails + if !capacity_from_kubelet.nil? && capacity_from_kubelet.length > 1 + @cpu_capacity = capacity_from_kubelet[0] + @memory_capacity = capacity_from_kubelet[1] + else + # cpu_capacity and memory_capacity keep initialized value of 0.0 + @log.error "Error getting capacity_from_kubelet: cpu_capacity and memory_capacity" + end + end end diff --git a/source/plugins/ruby/filter_inventory2mdm.rb b/source/plugins/ruby/filter_inventory2mdm.rb index b5ef587ff..38ccab885 100644 --- a/source/plugins/ruby/filter_inventory2mdm.rb +++ b/source/plugins/ruby/filter_inventory2mdm.rb @@ -13,7 +13,6 @@ class Inventory2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/filter_inventory2mdm.log' - config_param :custom_metrics_azure_regions, :string @@node_count_metric_name = 'nodesCount' @@pod_count_metric_name = 'podCount' @@ -98,7 +97,7 @@ def configure(conf) def start super - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(@custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" end diff --git a/source/plugins/ruby/filter_telegraf2mdm.rb b/source/plugins/ruby/filter_telegraf2mdm.rb index 98d258ea5..88ae428d1 100644 --- a/source/plugins/ruby/filter_telegraf2mdm.rb +++ b/source/plugins/ruby/filter_telegraf2mdm.rb @@ -15,7 +15,6 @@ class Telegraf2MdmFilter < Filter config_param :enable_log, :integer, :default => 0 config_param :log_path, :string, :default => "/var/opt/microsoft/docker-cimprov/log/filter_telegraf2mdm.log" - config_param :custom_metrics_azure_regions, :string @process_incoming_stream = true @@ -36,7 +35,7 @@ def configure(conf) def start super begin - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(@custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" rescue => errorStr @log.info "Error initializing plugin #{errorStr}" diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 6f59a3fc1..4f6017cc5 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -17,8 +17,9 @@ def initialize require_relative "omslog" require_relative "ApplicationInsightsUtility" - # 30000 events account to approximately 5MB - @EVENTS_CHUNK_SIZE = 30000 + # refer tomlparser-agent-config for defaults + # this configurable via configmap + @EVENTS_CHUNK_SIZE = 0 # Initializing events count for telemetry @eventsCount = 0 @@ -36,6 +37,15 @@ def configure(conf) def start if @run_interval + if !ENV["EVENTS_CHUNK_SIZE"].nil? && !ENV["EVENTS_CHUNK_SIZE"].empty? && ENV["EVENTS_CHUNK_SIZE"].to_i > 0 + @EVENTS_CHUNK_SIZE = ENV["EVENTS_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_events::start: setting to default value since got EVENTS_CHUNK_SIZE nil or empty") + @EVENTS_CHUNK_SIZE = 4000 + end + $log.info("in_kube_events::start : EVENTS_CHUNK_SIZE @ #{@EVENTS_CHUNK_SIZE}") + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -82,6 +92,8 @@ def enumerate end $log.info("in_kube_events::enumerate : Done getting events from Kube API @ #{Time.now.utc.iso8601}") if (!eventList.nil? && !eventList.empty? && eventList.key?("items") && !eventList["items"].nil? && !eventList["items"].empty?) + eventsCount = eventList["items"].length + $log.info "in_kube_events::enumerate:Received number of events in eventList is #{eventsCount} @ #{Time.now.utc.iso8601}" newEventQueryState = parse_and_emit_records(eventList, eventQueryState, newEventQueryState, batchTime) else $log.warn "in_kube_events::enumerate:Received empty eventList" @@ -91,6 +103,8 @@ def enumerate while (!continuationToken.nil? && !continuationToken.empty?) continuationToken, eventList = KubernetesApiClient.getResourcesAndContinuationToken("events?fieldSelector=type!=Normal&limit=#{@EVENTS_CHUNK_SIZE}&continue=#{continuationToken}") if (!eventList.nil? && !eventList.empty? && eventList.key?("items") && !eventList["items"].nil? && !eventList["items"].empty?) + eventsCount = eventList["items"].length + $log.info "in_kube_events::enumerate:Received number of events in eventList is #{eventsCount} @ #{Time.now.utc.iso8601}" newEventQueryState = parse_and_emit_records(eventList, eventQueryState, newEventQueryState, batchTime) else $log.warn "in_kube_events::enumerate:Received empty eventList" diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 4d58382f5..e7c5060a5 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -32,7 +32,12 @@ def initialize require_relative "ApplicationInsightsUtility" require_relative "oms_common" require_relative "omslog" - @NODES_CHUNK_SIZE = "400" + # refer tomlparser-agent-config for the defaults + @NODES_CHUNK_SIZE = 0 + @NODES_EMIT_STREAM_BATCH_SIZE = 0 + + @nodeInventoryE2EProcessingLatencyMs = 0 + @nodesAPIE2ELatencyMs = 0 require_relative "constants" end @@ -45,11 +50,30 @@ def configure(conf) def start if @run_interval + if !ENV["NODES_CHUNK_SIZE"].nil? && !ENV["NODES_CHUNK_SIZE"].empty? && ENV["NODES_CHUNK_SIZE"].to_i > 0 + @NODES_CHUNK_SIZE = ENV["NODES_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_nodes::start: setting to default value since got NODES_CHUNK_SIZE nil or empty") + @NODES_CHUNK_SIZE = 250 + end + $log.info("in_kube_nodes::start : NODES_CHUNK_SIZE @ #{@NODES_CHUNK_SIZE}") + + if !ENV["NODES_EMIT_STREAM_BATCH_SIZE"].nil? && !ENV["NODES_EMIT_STREAM_BATCH_SIZE"].empty? && ENV["NODES_EMIT_STREAM_BATCH_SIZE"].to_i > 0 + @NODES_EMIT_STREAM_BATCH_SIZE = ENV["NODES_EMIT_STREAM_BATCH_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_nodes::start: setting to default value since got NODES_EMIT_STREAM_BATCH_SIZE nil or empty") + @NODES_EMIT_STREAM_BATCH_SIZE = 100 + end + $log.info("in_kube_nodes::start : NODES_EMIT_STREAM_BATCH_SIZE @ #{@NODES_EMIT_STREAM_BATCH_SIZE}") + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @thread = Thread.new(&method(:run_periodic)) @@nodeTelemetryTimeTracker = DateTime.now.to_time.to_i + @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i end end @@ -69,14 +93,20 @@ def enumerate currentTime = Time.now batchTime = currentTime.utc.iso8601 + @nodesAPIE2ELatencyMs = 0 + @nodeInventoryE2EProcessingLatencyMs = 0 + nodeInventoryStartTime = (Time.now.to_f * 1000).to_i + nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_nodes::enumerate : Getting nodes from Kube API @ #{Time.now.utc.iso8601}") resourceUri = KubernetesApiClient.getNodesResourceUri("nodes?limit=#{@NODES_CHUNK_SIZE}") continuationToken, nodeInventory = KubernetesApiClient.getResourcesAndContinuationToken(resourceUri) - $log.info("in_kube_nodes::enumerate : Done getting nodes from Kube API @ #{Time.now.utc.iso8601}") + nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @nodesAPIE2ELatencyMs = (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(nodeInventory, batchTime) else $log.warn "in_kube_nodes::enumerate:Received empty nodeInventory" @@ -84,14 +114,26 @@ def enumerate #If we receive a continuation token, make calls, process and flush data until we have processed all data while (!continuationToken.nil? && !continuationToken.empty?) + nodesAPIChunkStartTime = (Time.now.to_f * 1000).to_i continuationToken, nodeInventory = KubernetesApiClient.getResourcesAndContinuationToken(resourceUri + "&continue=#{continuationToken}") + nodesAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @nodesAPIE2ELatencyMs = @nodesAPIE2ELatencyMs + (nodesAPIChunkEndTime - nodesAPIChunkStartTime) if (!nodeInventory.nil? && !nodeInventory.empty? && nodeInventory.key?("items") && !nodeInventory["items"].nil? && !nodeInventory["items"].empty?) + $log.info("in_kube_nodes::enumerate : number of node items :#{nodeInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(nodeInventory, batchTime) else $log.warn "in_kube_nodes::enumerate:Received empty nodeInventory" end end + @nodeInventoryE2EProcessingLatencyMs = ((Time.now.to_f * 1000).to_i - nodeInventoryStartTime) + timeDifference = (DateTime.now.to_time.to_i - @@nodeInventoryLatencyTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + ApplicationInsightsUtility.sendMetricTelemetry("NodeInventoryE2EProcessingLatencyMs", @nodeInventoryE2EProcessingLatencyMs, {}) + ApplicationInsightsUtility.sendMetricTelemetry("NodesAPIE2ELatencyMs", @nodesAPIE2ELatencyMs, {}) + @@nodeInventoryLatencyTelemetryTimeTracker = DateTime.now.to_time.to_i + end # Setting this to nil so that we dont hold memory until GC kicks in nodeInventory = nil rescue => errorStr @@ -109,77 +151,32 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) eventStream = MultiEventStream.new containerNodeInventoryEventStream = MultiEventStream.new insightsMetricsEventStream = MultiEventStream.new + kubePerfEventStream = MultiEventStream.new @@istestvar = ENV["ISTEST"] #get node inventory - nodeInventory["items"].each do |items| - record = {} - # Sending records for ContainerNodeInventory - containerNodeInventoryRecord = {} - containerNodeInventoryRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - containerNodeInventoryRecord["Computer"] = items["metadata"]["name"] - - record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - record["Computer"] = items["metadata"]["name"] - record["ClusterName"] = KubernetesApiClient.getClusterName - record["ClusterId"] = KubernetesApiClient.getClusterId - record["CreationTimeStamp"] = items["metadata"]["creationTimestamp"] - record["Labels"] = [items["metadata"]["labels"]] - record["Status"] = "" - - if !items["spec"]["providerID"].nil? && !items["spec"]["providerID"].empty? - if File.file?(@@AzStackCloudFileName) # existence of this file indicates agent running on azstack - record["KubernetesProviderID"] = "azurestack" - else - #Multicluster kusto query is filtering after splitting by ":" to the left, so do the same here - #https://msazure.visualstudio.com/One/_git/AzureUX-Monitoring?path=%2Fsrc%2FMonitoringExtension%2FClient%2FInfraInsights%2FData%2FQueryTemplates%2FMultiClusterKustoQueryTemplate.ts&_a=contents&version=GBdev - provider = items["spec"]["providerID"].split(":")[0] - if !provider.nil? && !provider.empty? - record["KubernetesProviderID"] = provider - else - record["KubernetesProviderID"] = items["spec"]["providerID"] - end - end - else - record["KubernetesProviderID"] = "onprem" - end - - # Refer to https://kubernetes.io/docs/concepts/architecture/nodes/#condition for possible node conditions. - # We check the status of each condition e.g. {"type": "OutOfDisk","status": "False"} . Based on this we - # populate the KubeNodeInventory Status field. A possible value for this field could be "Ready OutofDisk" - # implying that the node is ready for hosting pods, however its out of disk. - - if items["status"].key?("conditions") && !items["status"]["conditions"].empty? - allNodeConditions = "" - items["status"]["conditions"].each do |condition| - if condition["status"] == "True" - if !allNodeConditions.empty? - allNodeConditions = allNodeConditions + "," + condition["type"] - else - allNodeConditions = condition["type"] - end - end - #collect last transition to/from ready (no matter ready is true/false) - if condition["type"] == "Ready" && !condition["lastTransitionTime"].nil? - record["LastTransitionTimeReady"] = condition["lastTransitionTime"] - end - end - if !allNodeConditions.empty? - record["Status"] = allNodeConditions + nodeInventory["items"].each do |item| + # node inventory + nodeInventoryRecord = getNodeInventoryRecord(item, batchTime) + wrapper = { + "DataType" => "KUBE_NODE_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [nodeInventoryRecord.each { |k, v| nodeInventoryRecord[k] = v }], + } + eventStream.add(emitTime, wrapper) if wrapper + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@tag, eventStream) if eventStream + $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream + + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end + eventStream = MultiEventStream.new end - nodeInfo = items["status"]["nodeInfo"] - record["KubeletVersion"] = nodeInfo["kubeletVersion"] - record["KubeProxyVersion"] = nodeInfo["kubeProxyVersion"] - containerNodeInventoryRecord["OperatingSystem"] = nodeInfo["osImage"] - containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] - if containerRuntimeVersion.downcase.start_with?("docker://") - containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion.split("//")[1] - else - # using containerRuntimeVersion as DockerVersion as is for non docker runtimes - containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion - end - # ContainerNodeInventory data for docker version and operating system. + # container node inventory + containerNodeInventoryRecord = getContainerNodeInventoryRecord(item, batchTime) containerNodeInventoryWrapper = { "DataType" => "CONTAINER_NODE_INVENTORY_BLOB", "IPName" => "ContainerInsights", @@ -187,33 +184,81 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) } containerNodeInventoryEventStream.add(emitTime, containerNodeInventoryWrapper) if containerNodeInventoryWrapper - wrapper = { - "DataType" => "KUBE_NODE_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && containerNodeInventoryEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream + containerNodeInventoryEventStream = MultiEventStream.new + end + + # node metrics records + nodeMetricRecords = [] + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "allocatable", "cpu", "cpuAllocatableNanoCores", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "allocatable", "memory", "memoryAllocatableBytes", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "cpu", "cpuCapacityNanoCores", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "memory", "memoryCapacityBytes", batchTime) + if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? + nodeMetricRecords.push(nodeMetricRecord) + end + nodeMetricRecords.each do |metricRecord| + metricRecord["DataType"] = "LINUX_PERF_BLOB" + metricRecord["IPName"] = "LogManagement" + kubePerfEventStream.add(emitTime, metricRecord) if metricRecord + end + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = MultiEventStream.new + end + + # node GPU metrics record + nodeGPUInsightsMetricsRecords = [] + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "allocatable", "nvidia.com/gpu", "nodeGpuAllocatable", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "capacity", "nvidia.com/gpu", "nodeGpuCapacity", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "allocatable", "amd.com/gpu", "nodeGpuAllocatable", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + insightsMetricsRecord = KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(item, "capacity", "amd.com/gpu", "nodeGpuCapacity", batchTime) + if !insightsMetricsRecord.nil? && !insightsMetricsRecord.empty? + nodeGPUInsightsMetricsRecords.push(insightsMetricsRecord) + end + nodeGPUInsightsMetricsRecords.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(emitTime, wrapper) if wrapper + end + if @NODES_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @NODES_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = MultiEventStream.new + end # Adding telemetry to send node telemetry every 10 minutes timeDifference = (DateTime.now.to_time.to_i - @@nodeTelemetryTimeTracker).abs timeDifferenceInMinutes = timeDifference / 60 if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) - properties = {} - properties["Computer"] = record["Computer"] - properties["KubeletVersion"] = record["KubeletVersion"] - properties["OperatingSystem"] = nodeInfo["operatingSystem"] - # DockerVersion field holds docker version if runtime is docker/moby else :// - if containerRuntimeVersion.downcase.start_with?("docker://") - properties["DockerVersion"] = containerRuntimeVersion.split("//")[1] - else - properties["DockerVersion"] = containerRuntimeVersion - end - properties["KubernetesProviderID"] = record["KubernetesProviderID"] - properties["KernelVersion"] = nodeInfo["kernelVersion"] - properties["OSImage"] = nodeInfo["osImage"] + properties = getNodeTelemetryProps(item) + properties["KubernetesProviderID"] = nodeInventoryRecord["KubernetesProviderID"] + capacityInfo = item["status"]["capacity"] - capacityInfo = items["status"]["capacity"] ApplicationInsightsUtility.sendMetricTelemetry("NodeMemory", capacityInfo["memory"], properties) - begin if (!capacityInfo["nvidia.com/gpu"].nil?) && (!capacityInfo["nvidia.com/gpu"].empty?) properties["nvigpus"] = capacityInfo["nvidia.com/gpu"] @@ -247,72 +292,32 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) telemetrySent = true end end - router.emit_stream(@tag, eventStream) if eventStream - router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream - router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream if telemetrySent == true @@nodeTelemetryTimeTracker = DateTime.now.to_time.to_i end - - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) - $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + if eventStream.count > 0 + $log.info("in_kube_node::parse_and_emit_records: number of node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@tag, eventStream) if eventStream + $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream + eventStream = nil end - #:optimize:kubeperf merge - begin - #if(!nodeInventory.empty?) - nodeMetricDataItems = [] - #allocatable metrics @ node level - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "allocatable", "cpu", "cpuAllocatableNanoCores", batchTime)) - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "allocatable", "memory", "memoryAllocatableBytes", batchTime)) - #capacity metrics @ node level - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "capacity", "cpu", "cpuCapacityNanoCores", batchTime)) - nodeMetricDataItems.concat(KubernetesApiClient.parseNodeLimits(nodeInventory, "capacity", "memory", "memoryCapacityBytes", batchTime)) - - kubePerfEventStream = MultiEventStream.new - - nodeMetricDataItems.each do |record| - record["DataType"] = "LINUX_PERF_BLOB" - record["IPName"] = "LogManagement" - kubePerfEventStream.add(emitTime, record) if record - end - #end - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream - - #start GPU InsightsMetrics items - begin - nodeGPUInsightsMetricsDataItems = [] - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "allocatable", "nvidia.com/gpu", "nodeGpuAllocatable", batchTime)) - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "capacity", "nvidia.com/gpu", "nodeGpuCapacity", batchTime)) - - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "allocatable", "amd.com/gpu", "nodeGpuAllocatable", batchTime)) - nodeGPUInsightsMetricsDataItems.concat(KubernetesApiClient.parseNodeLimitsAsInsightsMetrics(nodeInventory, "capacity", "amd.com/gpu", "nodeGpuCapacity", batchTime)) - - nodeGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(emitTime, wrapper) if wrapper - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - rescue => errorStr - $log.warn "Failed when processing GPU metrics in_kube_nodes : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - #end GPU InsightsMetrics items - rescue => errorStr - $log.warn "Failed in enumerate for KubePerf from in_kube_nodes : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + if containerNodeInventoryEventStream.count > 0 + $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream + containerNodeInventoryEventStream = nil end - #:optimize:end kubeperf merge + if kubePerfEventStream.count > 0 + $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = nil + end + if insightsMetricsEventStream.count > 0 + $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = nil + end rescue => errorStr $log.warn "Failed to retrieve node inventory: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) @@ -352,5 +357,112 @@ def run_periodic end @mutex.unlock end + + # TODO - move this method to KubernetesClient or helper class + def getNodeInventoryRecord(item, batchTime = Time.utc.iso8601) + record = {} + begin + record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + record["Computer"] = item["metadata"]["name"] + record["ClusterName"] = KubernetesApiClient.getClusterName + record["ClusterId"] = KubernetesApiClient.getClusterId + record["CreationTimeStamp"] = item["metadata"]["creationTimestamp"] + record["Labels"] = [item["metadata"]["labels"]] + record["Status"] = "" + + if !item["spec"]["providerID"].nil? && !item["spec"]["providerID"].empty? + if File.file?(@@AzStackCloudFileName) # existence of this file indicates agent running on azstack + record["KubernetesProviderID"] = "azurestack" + else + #Multicluster kusto query is filtering after splitting by ":" to the left, so do the same here + #https://msazure.visualstudio.com/One/_git/AzureUX-Monitoring?path=%2Fsrc%2FMonitoringExtension%2FClient%2FInfraInsights%2FData%2FQueryTemplates%2FMultiClusterKustoQueryTemplate.ts&_a=contents&version=GBdev + provider = item["spec"]["providerID"].split(":")[0] + if !provider.nil? && !provider.empty? + record["KubernetesProviderID"] = provider + else + record["KubernetesProviderID"] = item["spec"]["providerID"] + end + end + else + record["KubernetesProviderID"] = "onprem" + end + + # Refer to https://kubernetes.io/docs/concepts/architecture/nodes/#condition for possible node conditions. + # We check the status of each condition e.g. {"type": "OutOfDisk","status": "False"} . Based on this we + # populate the KubeNodeInventory Status field. A possible value for this field could be "Ready OutofDisk" + # implying that the node is ready for hosting pods, however its out of disk. + if item["status"].key?("conditions") && !item["status"]["conditions"].empty? + allNodeConditions = "" + item["status"]["conditions"].each do |condition| + if condition["status"] == "True" + if !allNodeConditions.empty? + allNodeConditions = allNodeConditions + "," + condition["type"] + else + allNodeConditions = condition["type"] + end + end + #collect last transition to/from ready (no matter ready is true/false) + if condition["type"] == "Ready" && !condition["lastTransitionTime"].nil? + record["LastTransitionTimeReady"] = condition["lastTransitionTime"] + end + end + if !allNodeConditions.empty? + record["Status"] = allNodeConditions + end + end + nodeInfo = item["status"]["nodeInfo"] + record["KubeletVersion"] = nodeInfo["kubeletVersion"] + record["KubeProxyVersion"] = nodeInfo["kubeProxyVersion"] + rescue => errorStr + $log.warn "in_kube_nodes::getNodeInventoryRecord:Failed: #{errorStr}" + end + return record + end + + # TODO - move this method to KubernetesClient or helper class + def getContainerNodeInventoryRecord(item, batchTime = Time.utc.iso8601) + containerNodeInventoryRecord = {} + begin + containerNodeInventoryRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + containerNodeInventoryRecord["Computer"] = item["metadata"]["name"] + nodeInfo = item["status"]["nodeInfo"] + containerNodeInventoryRecord["OperatingSystem"] = nodeInfo["osImage"] + containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] + if containerRuntimeVersion.downcase.start_with?("docker://") + containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion.split("//")[1] + else + # using containerRuntimeVersion as DockerVersion as is for non docker runtimes + containerNodeInventoryRecord["DockerVersion"] = containerRuntimeVersion + end + rescue => errorStr + $log.warn "in_kube_nodes::getContainerNodeInventoryRecord:Failed: #{errorStr}" + end + return containerNodeInventoryRecord + end + + # TODO - move this method to KubernetesClient or helper class + def getNodeTelemetryProps(item) + properties = {} + begin + properties["Computer"] = item["metadata"]["name"] + nodeInfo = item["status"]["nodeInfo"] + properties["KubeletVersion"] = nodeInfo["kubeletVersion"] + properties["OperatingSystem"] = nodeInfo["osImage"] + properties["KernelVersion"] = nodeInfo["kernelVersion"] + properties["OSImage"] = nodeInfo["osImage"] + containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] + if containerRuntimeVersion.downcase.start_with?("docker://") + properties["DockerVersion"] = containerRuntimeVersion.split("//")[1] + else + # using containerRuntimeVersion as DockerVersion as is for non docker runtimes + properties["DockerVersion"] = containerRuntimeVersion + end + properties["NODES_CHUNK_SIZE"] = @NODES_CHUNK_SIZE + properties["NODES_EMIT_STREAM_BATCH_SIZE"] = @NODES_EMIT_STREAM_BATCH_SIZE + rescue => errorStr + $log.warn "in_kube_nodes::getContainerNodeIngetNodeTelemetryPropsventoryRecord:Failed: #{errorStr}" + end + return properties + end end # Kube_Node_Input end # module diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 4880d80e7..0cff2eefe 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -2,7 +2,7 @@ # frozen_string_literal: true module Fluent - require_relative "podinventory_to_mdm" + require_relative "podinventory_to_mdm" class Kube_PodInventory_Input < Input Plugin.register_input("kubepodinventory", self) @@ -19,7 +19,7 @@ def initialize require "yajl" require "set" require "time" - + require_relative "kubernetes_container_inventory" require_relative "KubernetesApiClient" require_relative "ApplicationInsightsUtility" @@ -27,24 +27,48 @@ def initialize require_relative "omslog" require_relative "constants" - @PODS_CHUNK_SIZE = "1500" + # refer tomlparser-agent-config for updating defaults + # this configurable via configmap + @PODS_CHUNK_SIZE = 0 + @PODS_EMIT_STREAM_BATCH_SIZE = 0 + @podCount = 0 + @serviceCount = 0 @controllerSet = Set.new [] @winContainerCount = 0 @controllerData = {} + @podInventoryE2EProcessingLatencyMs = 0 + @podsAPIE2ELatencyMs = 0 end config_param :run_interval, :time, :default => 60 config_param :tag, :string, :default => "oms.containerinsights.KubePodInventory" - config_param :custom_metrics_azure_regions, :string def configure(conf) super - @inventoryToMdmConvertor = Inventory2MdmConvertor.new(@custom_metrics_azure_regions) + @inventoryToMdmConvertor = Inventory2MdmConvertor.new() end def start if @run_interval + if !ENV["PODS_CHUNK_SIZE"].nil? && !ENV["PODS_CHUNK_SIZE"].empty? && ENV["PODS_CHUNK_SIZE"].to_i > 0 + @PODS_CHUNK_SIZE = ENV["PODS_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_podinventory::start: setting to default value since got PODS_CHUNK_SIZE nil or empty") + @PODS_CHUNK_SIZE = 1000 + end + $log.info("in_kube_podinventory::start : PODS_CHUNK_SIZE @ #{@PODS_CHUNK_SIZE}") + + if !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].nil? && !ENV["PODS_EMIT_STREAM_BATCH_SIZE"].empty? && ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i > 0 + @PODS_EMIT_STREAM_BATCH_SIZE = ENV["PODS_EMIT_STREAM_BATCH_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kube_podinventory::start: setting to default value since got PODS_EMIT_STREAM_BATCH_SIZE nil or empty") + @PODS_EMIT_STREAM_BATCH_SIZE = 200 + end + $log.info("in_kube_podinventory::start : PODS_EMIT_STREAM_BATCH_SIZE @ #{@PODS_EMIT_STREAM_BATCH_SIZE}") + @finished = false @condition = ConditionVariable.new @mutex = Mutex.new @@ -68,12 +92,15 @@ def enumerate(podList = nil) podInventory = podList telemetryFlush = false @podCount = 0 + @serviceCount = 0 @controllerSet = Set.new [] @winContainerCount = 0 @controllerData = {} currentTime = Time.now batchTime = currentTime.utc.iso8601 - + serviceRecords = [] + @podInventoryE2EProcessingLatencyMs = 0 + podInventoryStartTime = (Time.now.to_f * 1000).to_i # Get services first so that we dont need to make a call for very chunk $log.info("in_kube_podinventory::enumerate : Getting services from Kube API @ #{Time.now.utc.iso8601}") serviceInfo = KubernetesApiClient.getKubeResourceInfo("services") @@ -85,32 +112,48 @@ def enumerate(podList = nil) serviceList = Yajl::Parser.parse(StringIO.new(serviceInfo.body)) $log.info("in_kube_podinventory::enumerate:End:Parsing services data using yajl @ #{Time.now.utc.iso8601}") serviceInfo = nil + # service inventory records much smaller and fixed size compared to serviceList + serviceRecords = KubernetesApiClient.getKubeServicesInventoryRecords(serviceList, batchTime) + # updating for telemetry + @serviceCount += serviceRecords.length + serviceList = nil end + # to track e2e processing latency + @podsAPIE2ELatencyMs = 0 + podsAPIChunkStartTime = (Time.now.to_f * 1000).to_i # Initializing continuation token to nil continuationToken = nil $log.info("in_kube_podinventory::enumerate : Getting pods from Kube API @ #{Time.now.utc.iso8601}") continuationToken, podInventory = KubernetesApiClient.getResourcesAndContinuationToken("pods?limit=#{@PODS_CHUNK_SIZE}") $log.info("in_kube_podinventory::enumerate : Done getting pods from Kube API @ #{Time.now.utc.iso8601}") + podsAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @podsAPIE2ELatencyMs = (podsAPIChunkEndTime - podsAPIChunkStartTime) if (!podInventory.nil? && !podInventory.empty? && podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) - parse_and_emit_records(podInventory, serviceList, continuationToken, batchTime) + $log.info("in_kube_podinventory::enumerate : number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime) else $log.warn "in_kube_podinventory::enumerate:Received empty podInventory" end #If we receive a continuation token, make calls, process and flush data until we have processed all data while (!continuationToken.nil? && !continuationToken.empty?) + podsAPIChunkStartTime = (Time.now.to_f * 1000).to_i continuationToken, podInventory = KubernetesApiClient.getResourcesAndContinuationToken("pods?limit=#{@PODS_CHUNK_SIZE}&continue=#{continuationToken}") + podsAPIChunkEndTime = (Time.now.to_f * 1000).to_i + @podsAPIE2ELatencyMs = @podsAPIE2ELatencyMs + (podsAPIChunkEndTime - podsAPIChunkStartTime) if (!podInventory.nil? && !podInventory.empty? && podInventory.key?("items") && !podInventory["items"].nil? && !podInventory["items"].empty?) - parse_and_emit_records(podInventory, serviceList, continuationToken, batchTime) + $log.info("in_kube_podinventory::enumerate : number of pod items :#{podInventory["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime) else $log.warn "in_kube_podinventory::enumerate:Received empty podInventory" end end + @podInventoryE2EProcessingLatencyMs = ((Time.now.to_f * 1000).to_i - podInventoryStartTime) # Setting these to nil so that we dont hold memory until GC kicks in podInventory = nil - serviceList = nil + serviceRecords = nil # Adding telemetry to send pod telemetry every 5 minutes timeDifference = (DateTime.now.to_time.to_i - @@podTelemetryTimeTracker).abs @@ -123,14 +166,19 @@ def enumerate(podList = nil) if telemetryFlush == true telemetryProperties = {} telemetryProperties["Computer"] = @@hostName + telemetryProperties["PODS_CHUNK_SIZE"] = @PODS_CHUNK_SIZE + telemetryProperties["PODS_EMIT_STREAM_BATCH_SIZE"] = @PODS_EMIT_STREAM_BATCH_SIZE ApplicationInsightsUtility.sendCustomEvent("KubePodInventoryHeartBeatEvent", telemetryProperties) ApplicationInsightsUtility.sendMetricTelemetry("PodCount", @podCount, {}) + ApplicationInsightsUtility.sendMetricTelemetry("ServiceCount", @serviceCount, {}) telemetryProperties["ControllerData"] = @controllerData.to_json ApplicationInsightsUtility.sendMetricTelemetry("ControllerCount", @controllerSet.length, telemetryProperties) if @winContainerCount > 0 telemetryProperties["ClusterWideWindowsContainersCount"] = @winContainerCount ApplicationInsightsUtility.sendCustomEvent("WindowsContainerInventoryEvent", telemetryProperties) end + ApplicationInsightsUtility.sendMetricTelemetry("PodInventoryE2EProcessingLatencyMs", @podInventoryE2EProcessingLatencyMs, telemetryProperties) + ApplicationInsightsUtility.sendMetricTelemetry("PodsAPIE2ELatencyMs", @podsAPIE2ELatencyMs, telemetryProperties) @@podTelemetryTimeTracker = DateTime.now.to_time.to_i end rescue => errorStr @@ -138,260 +186,138 @@ def enumerate(podList = nil) $log.debug_backtrace(errorStr.backtrace) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) end - end + end - def parse_and_emit_records(podInventory, serviceList, continuationToken, batchTime = Time.utc.iso8601) + def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batchTime = Time.utc.iso8601) currentTime = Time.now emitTime = currentTime.to_f #batchTime = currentTime.utc.iso8601 eventStream = MultiEventStream.new + kubePerfEventStream = MultiEventStream.new + insightsMetricsEventStream = MultiEventStream.new @@istestvar = ENV["ISTEST"] begin #begin block start # Getting windows nodes from kubeapi winNodes = KubernetesApiClient.getWindowsNodesArray - - podInventory["items"].each do |items| #podInventory block start - containerInventoryRecords = [] - records = [] - record = {} - record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - record["Name"] = items["metadata"]["name"] - podNameSpace = items["metadata"]["namespace"] - - # For ARO v3 cluster, skip the pods scheduled on to master or infra nodes - if KubernetesApiClient.isAROV3Cluster && !items["spec"].nil? && !items["spec"]["nodeName"].nil? && - (items["spec"]["nodeName"].downcase.start_with?("infra-") || - items["spec"]["nodeName"].downcase.start_with?("master-")) - next - end - - podUid = KubernetesApiClient.getPodUid(podNameSpace, items["metadata"]) - if podUid.nil? - next - end - record["PodUid"] = podUid - record["PodLabel"] = [items["metadata"]["labels"]] - record["Namespace"] = podNameSpace - record["PodCreationTimeStamp"] = items["metadata"]["creationTimestamp"] - #for unscheduled (non-started) pods startTime does NOT exist - if !items["status"]["startTime"].nil? - record["PodStartTime"] = items["status"]["startTime"] - else - record["PodStartTime"] = "" - end - #podStatus - # the below is for accounting 'NodeLost' scenario, where-in the pod(s) in the lost node is still being reported as running - podReadyCondition = true - if !items["status"]["reason"].nil? && items["status"]["reason"] == "NodeLost" && !items["status"]["conditions"].nil? - items["status"]["conditions"].each do |condition| - if condition["type"] == "Ready" && condition["status"] == "False" - podReadyCondition = false - break - end + podInventory["items"].each do |item| #podInventory block start + # pod inventory records + podInventoryRecords = getPodInventoryRecords(item, serviceRecords, batchTime) + podInventoryRecords.each do |record| + if !record.nil? + wrapper = { + "DataType" => "KUBE_POD_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [record.each { |k, v| record[k] = v }], + } + eventStream.add(emitTime, wrapper) if wrapper + @inventoryToMdmConvertor.process_pod_inventory_record(wrapper) end end - - if podReadyCondition == false - record["PodStatus"] = "Unknown" - # ICM - https://portal.microsofticm.com/imp/v3/incidents/details/187091803/home - elsif !items["metadata"]["deletionTimestamp"].nil? && !items["metadata"]["deletionTimestamp"].empty? - record["PodStatus"] = Constants::POD_STATUS_TERMINATING - else - record["PodStatus"] = items["status"]["phase"] - end - #for unscheduled (non-started) pods podIP does NOT exist - if !items["status"]["podIP"].nil? - record["PodIp"] = items["status"]["podIP"] - else - record["PodIp"] = "" - end - #for unscheduled (non-started) pods nodeName does NOT exist - if !items["spec"]["nodeName"].nil? - record["Computer"] = items["spec"]["nodeName"] - else - record["Computer"] = "" - end - # Setting this flag to true so that we can send ContainerInventory records for containers # on windows nodes and parse environment variables for these containers if winNodes.length > 0 - if (!record["Computer"].empty? && (winNodes.include? record["Computer"])) + nodeName = "" + if !item["spec"]["nodeName"].nil? + nodeName = item["spec"]["nodeName"] + end + if (!nodeName.empty? && (winNodes.include? nodeName)) clusterCollectEnvironmentVar = ENV["AZMON_CLUSTER_COLLECT_ENV_VAR"] #Generate ContainerInventory records for windows nodes so that we can get image and image tag in property panel - containerInventoryRecordsInPodItem = KubernetesContainerInventory.getContainerInventoryRecords(items, batchTime, clusterCollectEnvironmentVar, true) - containerInventoryRecordsInPodItem.each do |containerRecord| - containerInventoryRecords.push(containerRecord) - end + containerInventoryRecords = KubernetesContainerInventory.getContainerInventoryRecords(item, batchTime, clusterCollectEnvironmentVar, true) + # Send container inventory records for containers on windows nodes + @winContainerCount += containerInventoryRecords.length + containerInventoryRecords.each do |cirecord| + if !cirecord.nil? + ciwrapper = { + "DataType" => "CONTAINER_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [cirecord.each { |k, v| cirecord[k] = v }], + } + eventStream.add(emitTime, ciwrapper) if ciwrapper + end + end end end - record["ClusterId"] = KubernetesApiClient.getClusterId - record["ClusterName"] = KubernetesApiClient.getClusterName - record["ServiceName"] = getServiceNameFromLabels(items["metadata"]["namespace"], items["metadata"]["labels"], serviceList) - - if !items["metadata"]["ownerReferences"].nil? - record["ControllerKind"] = items["metadata"]["ownerReferences"][0]["kind"] - record["ControllerName"] = items["metadata"]["ownerReferences"][0]["name"] - @controllerSet.add(record["ControllerKind"] + record["ControllerName"]) - #Adding controller kind to telemetry ro information about customer workload - if (@controllerData[record["ControllerKind"]].nil?) - @controllerData[record["ControllerKind"]] = 1 - else - controllerValue = @controllerData[record["ControllerKind"]] - @controllerData[record["ControllerKind"]] += 1 + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && eventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of pod inventory records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end + router.emit_stream(@tag, eventStream) if eventStream + eventStream = MultiEventStream.new end - podRestartCount = 0 - record["PodRestartCount"] = 0 - #Invoke the helper method to compute ready/not ready mdm metric - @inventoryToMdmConvertor.process_record_for_pods_ready_metric(record["ControllerName"], record["Namespace"], items["status"]["conditions"]) + #container perf records + containerMetricDataItems = [] + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "cpu", "cpuRequestNanoCores", batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "requests", "memory", "memoryRequestBytes", batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "cpu", "cpuLimitNanoCores", batchTime)) + containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(item, "limits", "memory", "memoryLimitBytes", batchTime)) - podContainers = [] - if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? - podContainers = podContainers + items["status"]["containerStatuses"] - end - # Adding init containers to the record list as well. - if items["status"].key?("initContainerStatuses") && !items["status"]["initContainerStatuses"].empty? - podContainers = podContainers + items["status"]["initContainerStatuses"] + containerMetricDataItems.each do |record| + record["DataType"] = "LINUX_PERF_BLOB" + record["IPName"] = "LogManagement" + kubePerfEventStream.add(emitTime, record) if record end - # if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? #container status block start - if !podContainers.empty? #container status block start - podContainers.each do |container| - containerRestartCount = 0 - lastFinishedTime = nil - # Need this flag to determine if we need to process container data for mdm metrics like oomkilled and container restart - #container Id is of the form - #docker://dfd9da983f1fd27432fb2c1fe3049c0a1d25b1c697b2dc1a530c986e58b16527 - if !container["containerID"].nil? - record["ContainerID"] = container["containerID"].split("//")[1] - else - # for containers that have image issues (like invalid image/tag etc..) this will be empty. do not make it all 0 - record["ContainerID"] = "" - end - #keeping this as which is same as InstanceName in perf table - if podUid.nil? || container["name"].nil? - next - else - record["ContainerName"] = podUid + "/" + container["name"] - end - #Pod restart count is a sumtotal of restart counts of individual containers - #within the pod. The restart count of a container is maintained by kubernetes - #itself in the form of a container label. - containerRestartCount = container["restartCount"] - record["ContainerRestartCount"] = containerRestartCount - - containerStatus = container["state"] - record["ContainerStatusReason"] = "" - # state is of the following form , so just picking up the first key name - # "state": { - # "waiting": { - # "reason": "CrashLoopBackOff", - # "message": "Back-off 5m0s restarting failed container=metrics-server pod=metrics-server-2011498749-3g453_kube-system(5953be5f-fcae-11e7-a356-000d3ae0e432)" - # } - # }, - # the below is for accounting 'NodeLost' scenario, where-in the containers in the lost node/pod(s) is still being reported as running - if podReadyCondition == false - record["ContainerStatus"] = "Unknown" - else - record["ContainerStatus"] = containerStatus.keys[0] - end - #TODO : Remove ContainerCreationTimeStamp from here since we are sending it as a metric - #Picking up both container and node start time from cAdvisor to be consistent - if containerStatus.keys[0] == "running" - record["ContainerCreationTimeStamp"] = container["state"]["running"]["startedAt"] - else - if !containerStatus[containerStatus.keys[0]]["reason"].nil? && !containerStatus[containerStatus.keys[0]]["reason"].empty? - record["ContainerStatusReason"] = containerStatus[containerStatus.keys[0]]["reason"] - end - # Process the record to see if job was completed 6 hours ago. If so, send metric to mdm - if !record["ControllerKind"].nil? && record["ControllerKind"].downcase == Constants::CONTROLLER_KIND_JOB - @inventoryToMdmConvertor.process_record_for_terminated_job_metric(record["ControllerName"], record["Namespace"], containerStatus) - end - end - - # Record the last state of the container. This may have information on why a container was killed. - begin - if !container["lastState"].nil? && container["lastState"].keys.length == 1 - lastStateName = container["lastState"].keys[0] - lastStateObject = container["lastState"][lastStateName] - if !lastStateObject.is_a?(Hash) - raise "expected a hash object. This could signify a bug or a kubernetes API change" - end - - if lastStateObject.key?("reason") && lastStateObject.key?("startedAt") && lastStateObject.key?("finishedAt") - newRecord = Hash.new - newRecord["lastState"] = lastStateName # get the name of the last state (ex: terminated) - lastStateReason = lastStateObject["reason"] - # newRecord["reason"] = lastStateObject["reason"] # (ex: OOMKilled) - newRecord["reason"] = lastStateReason # (ex: OOMKilled) - newRecord["startedAt"] = lastStateObject["startedAt"] # (ex: 2019-07-02T14:58:51Z) - lastFinishedTime = lastStateObject["finishedAt"] - newRecord["finishedAt"] = lastFinishedTime # (ex: 2019-07-02T14:58:52Z) - - # only write to the output field if everything previously ran without error - record["ContainerLastStatus"] = newRecord - - #Populate mdm metric for OOMKilled container count if lastStateReason is OOMKilled - if lastStateReason.downcase == Constants::REASON_OOM_KILLED - @inventoryToMdmConvertor.process_record_for_oom_killed_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) - end - lastStateReason = nil - else - record["ContainerLastStatus"] = Hash.new - end - else - record["ContainerLastStatus"] = Hash.new - end - - #Populate mdm metric for container restart count if greater than 0 - if (!containerRestartCount.nil? && (containerRestartCount.is_a? Integer) && containerRestartCount > 0) - @inventoryToMdmConvertor.process_record_for_container_restarts_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) - end - rescue => errorStr - $log.warn "Failed in parse_and_emit_record pod inventory while processing ContainerLastStatus: #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - record["ContainerLastStatus"] = Hash.new - end + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = MultiEventStream.new + end - podRestartCount += containerRestartCount - records.push(record.dup) - end - else # for unscheduled pods there are no status.containerStatuses, in this case we still want the pod - records.push(record) - end #container status block end - records.each do |record| - if !record.nil? - record["PodRestartCount"] = podRestartCount - wrapper = { - "DataType" => "KUBE_POD_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [record.each { |k, v| record[k] = v }], - } - eventStream.add(emitTime, wrapper) if wrapper - @inventoryToMdmConvertor.process_pod_inventory_record(wrapper) - end + # container GPU records + containerGPUInsightsMetricsDataItems = [] + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "nvidia.com/gpu", "containerGpuRequests", batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) + containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(item, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) + containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(emitTime, wrapper) if wrapper end - # Send container inventory records for containers on windows nodes - @winContainerCount += containerInventoryRecords.length - containerInventoryRecords.each do |cirecord| - if !cirecord.nil? - ciwrapper = { - "DataType" => "CONTAINER_INVENTORY_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [cirecord.each { |k, v| cirecord[k] = v }], - } - eventStream.add(emitTime, ciwrapper) if ciwrapper + + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && insightsMetricsEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of GPU insights metrics records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + insightsMetricsEventStream = MultiEventStream.new end end #podInventory block end - router.emit_stream(@tag, eventStream) if eventStream + if eventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records: number of pod inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@tag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + eventStream = nil + end + + if kubePerfEventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records: number of perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + kubePerfEventStream = nil + end + + if insightsMetricsEventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records: number of insights metrics records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end + insightsMetricsEventStream = nil + end - if continuationToken.nil? #no more chunks in this batch to be sent, get all pod inventory records to send + if continuationToken.nil? #no more chunks in this batch to be sent, get all mdm pod inventory records to send @log.info "Sending pod inventory mdm records to out_mdm" pod_inventory_mdm_records = @inventoryToMdmConvertor.get_pod_inventory_mdm_records(batchTime) @log.info "pod_inventory_mdm_records.size #{pod_inventory_mdm_records.size}" @@ -402,101 +328,36 @@ def parse_and_emit_records(podInventory, serviceList, continuationToken, batchTi router.emit_stream(@@MDMKubePodInventoryTag, mdm_pod_inventory_es) if mdm_pod_inventory_es end - #:optimize:kubeperf merge - begin - #if(!podInventory.empty?) - containerMetricDataItems = [] - #hostName = (OMS::Common.get_hostname) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "requests", "cpu", "cpuRequestNanoCores", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "requests", "memory", "memoryRequestBytes", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "limits", "cpu", "cpuLimitNanoCores", batchTime)) - containerMetricDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimits(podInventory, "limits", "memory", "memoryLimitBytes", batchTime)) - - kubePerfEventStream = MultiEventStream.new - insightsMetricsEventStream = MultiEventStream.new - - containerMetricDataItems.each do |record| - record["DataType"] = "LINUX_PERF_BLOB" - record["IPName"] = "LogManagement" - kubePerfEventStream.add(emitTime, record) if record - end - #end - router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream - - begin - #start GPU InsightsMetrics items - - containerGPUInsightsMetricsDataItems = [] - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "requests", "nvidia.com/gpu", "containerGpuRequests", batchTime)) - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "limits", "nvidia.com/gpu", "containerGpuLimits", batchTime)) - - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "requests", "amd.com/gpu", "containerGpuRequests", batchTime)) - containerGPUInsightsMetricsDataItems.concat(KubernetesApiClient.getContainerResourceRequestsAndLimitsAsInsightsMetrics(podInventory, "limits", "amd.com/gpu", "containerGpuLimits", batchTime)) - - containerGPUInsightsMetricsDataItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(emitTime, wrapper) if wrapper - - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubePodInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - #end GPU InsightsMetrics items - rescue => errorStr - $log.warn "Failed when processing GPU metrics in_kube_podinventory : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - rescue => errorStr - $log.warn "Failed in parse_and_emit_record for KubePerf from in_kube_podinventory : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) - end - #:optimize:end kubeperf merge - - #:optimize:start kubeservices merge - begin - if (!serviceList.nil? && !serviceList.empty?) - kubeServicesEventStream = MultiEventStream.new - serviceList["items"].each do |items| - kubeServiceRecord = {} - kubeServiceRecord["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated - kubeServiceRecord["ServiceName"] = items["metadata"]["name"] - kubeServiceRecord["Namespace"] = items["metadata"]["namespace"] - kubeServiceRecord["SelectorLabels"] = [items["spec"]["selector"]] + if continuationToken.nil? # sending kube services inventory records + kubeServicesEventStream = MultiEventStream.new + serviceRecords.each do |kubeServiceRecord| + if !kubeServiceRecord.nil? + # adding before emit to reduce memory foot print kubeServiceRecord["ClusterId"] = KubernetesApiClient.getClusterId kubeServiceRecord["ClusterName"] = KubernetesApiClient.getClusterName - kubeServiceRecord["ClusterIP"] = items["spec"]["clusterIP"] - kubeServiceRecord["ServiceType"] = items["spec"]["type"] - # : Add ports and status fields kubeServicewrapper = { "DataType" => "KUBE_SERVICES_BLOB", "IPName" => "ContainerInsights", "DataItems" => [kubeServiceRecord.each { |k, v| kubeServiceRecord[k] = v }], } kubeServicesEventStream.add(emitTime, kubeServicewrapper) if kubeServicewrapper + if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubeServicesEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE + $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") + router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream + kubeServicesEventStream = MultiEventStream.new + end end + end + + if kubeServicesEventStream.count > 0 + $log.info("in_kube_podinventory::parse_and_emit_records : number of service records emitted #{kubeServicesEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream end - rescue => errorStr - $log.warn "Failed in parse_and_emit_record for KubeServices from in_kube_podinventory : #{errorStr}" - $log.debug_backtrace(errorStr.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + kubeServicesEventStream = nil end - #:optimize:end kubeservices merge #Updating value for AppInsights telemetry @podCount += podInventory["items"].length - - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && eventStream.count > 0) - $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end rescue => errorStr $log.warn "Failed in parse_and_emit_record pod inventory: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) @@ -536,25 +397,238 @@ def run_periodic @mutex.unlock end - def getServiceNameFromLabels(namespace, labels, serviceList) + # TODO - move this method to KubernetesClient or helper class + def getPodInventoryRecords(item, serviceRecords, batchTime = Time.utc.iso8601) + records = [] + record = {} + + begin + record["CollectionTime"] = batchTime #This is the time that is mapped to become TimeGenerated + record["Name"] = item["metadata"]["name"] + podNameSpace = item["metadata"]["namespace"] + podUid = KubernetesApiClient.getPodUid(podNameSpace, item["metadata"]) + if podUid.nil? + return records + end + + nodeName = "" + #for unscheduled (non-started) pods nodeName does NOT exist + if !item["spec"]["nodeName"].nil? + nodeName = item["spec"]["nodeName"] + end + # For ARO v3 cluster, skip the pods scheduled on to master or infra nodes + if KubernetesApiClient.isAROv3MasterOrInfraPod(nodeName) + return records + end + + record["PodUid"] = podUid + record["PodLabel"] = [item["metadata"]["labels"]] + record["Namespace"] = podNameSpace + record["PodCreationTimeStamp"] = item["metadata"]["creationTimestamp"] + #for unscheduled (non-started) pods startTime does NOT exist + if !item["status"]["startTime"].nil? + record["PodStartTime"] = item["status"]["startTime"] + else + record["PodStartTime"] = "" + end + #podStatus + # the below is for accounting 'NodeLost' scenario, where-in the pod(s) in the lost node is still being reported as running + podReadyCondition = true + if !item["status"]["reason"].nil? && item["status"]["reason"] == "NodeLost" && !item["status"]["conditions"].nil? + item["status"]["conditions"].each do |condition| + if condition["type"] == "Ready" && condition["status"] == "False" + podReadyCondition = false + break + end + end + end + if podReadyCondition == false + record["PodStatus"] = "Unknown" + # ICM - https://portal.microsofticm.com/imp/v3/incidents/details/187091803/home + elsif !item["metadata"]["deletionTimestamp"].nil? && !item["metadata"]["deletionTimestamp"].empty? + record["PodStatus"] = Constants::POD_STATUS_TERMINATING + else + record["PodStatus"] = item["status"]["phase"] + end + #for unscheduled (non-started) pods podIP does NOT exist + if !item["status"]["podIP"].nil? + record["PodIp"] = item["status"]["podIP"] + else + record["PodIp"] = "" + end + + record["Computer"] = nodeName + record["ClusterId"] = KubernetesApiClient.getClusterId + record["ClusterName"] = KubernetesApiClient.getClusterName + record["ServiceName"] = getServiceNameFromLabels(item["metadata"]["namespace"], item["metadata"]["labels"], serviceRecords) + + if !item["metadata"]["ownerReferences"].nil? + record["ControllerKind"] = item["metadata"]["ownerReferences"][0]["kind"] + record["ControllerName"] = item["metadata"]["ownerReferences"][0]["name"] + @controllerSet.add(record["ControllerKind"] + record["ControllerName"]) + #Adding controller kind to telemetry ro information about customer workload + if (@controllerData[record["ControllerKind"]].nil?) + @controllerData[record["ControllerKind"]] = 1 + else + controllerValue = @controllerData[record["ControllerKind"]] + @controllerData[record["ControllerKind"]] += 1 + end + end + podRestartCount = 0 + record["PodRestartCount"] = 0 + + #Invoke the helper method to compute ready/not ready mdm metric + @inventoryToMdmConvertor.process_record_for_pods_ready_metric(record["ControllerName"], record["Namespace"], item["status"]["conditions"]) + + podContainers = [] + if item["status"].key?("containerStatuses") && !item["status"]["containerStatuses"].empty? + podContainers = podContainers + item["status"]["containerStatuses"] + end + # Adding init containers to the record list as well. + if item["status"].key?("initContainerStatuses") && !item["status"]["initContainerStatuses"].empty? + podContainers = podContainers + item["status"]["initContainerStatuses"] + end + # if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? #container status block start + if !podContainers.empty? #container status block start + podContainers.each do |container| + containerRestartCount = 0 + lastFinishedTime = nil + # Need this flag to determine if we need to process container data for mdm metrics like oomkilled and container restart + #container Id is of the form + #docker://dfd9da983f1fd27432fb2c1fe3049c0a1d25b1c697b2dc1a530c986e58b16527 + if !container["containerID"].nil? + record["ContainerID"] = container["containerID"].split("//")[1] + else + # for containers that have image issues (like invalid image/tag etc..) this will be empty. do not make it all 0 + record["ContainerID"] = "" + end + #keeping this as which is same as InstanceName in perf table + if podUid.nil? || container["name"].nil? + next + else + record["ContainerName"] = podUid + "/" + container["name"] + end + #Pod restart count is a sumtotal of restart counts of individual containers + #within the pod. The restart count of a container is maintained by kubernetes + #itself in the form of a container label. + containerRestartCount = container["restartCount"] + record["ContainerRestartCount"] = containerRestartCount + + containerStatus = container["state"] + record["ContainerStatusReason"] = "" + # state is of the following form , so just picking up the first key name + # "state": { + # "waiting": { + # "reason": "CrashLoopBackOff", + # "message": "Back-off 5m0s restarting failed container=metrics-server pod=metrics-server-2011498749-3g453_kube-system(5953be5f-fcae-11e7-a356-000d3ae0e432)" + # } + # }, + # the below is for accounting 'NodeLost' scenario, where-in the containers in the lost node/pod(s) is still being reported as running + if podReadyCondition == false + record["ContainerStatus"] = "Unknown" + else + record["ContainerStatus"] = containerStatus.keys[0] + end + #TODO : Remove ContainerCreationTimeStamp from here since we are sending it as a metric + #Picking up both container and node start time from cAdvisor to be consistent + if containerStatus.keys[0] == "running" + record["ContainerCreationTimeStamp"] = container["state"]["running"]["startedAt"] + else + if !containerStatus[containerStatus.keys[0]]["reason"].nil? && !containerStatus[containerStatus.keys[0]]["reason"].empty? + record["ContainerStatusReason"] = containerStatus[containerStatus.keys[0]]["reason"] + end + # Process the record to see if job was completed 6 hours ago. If so, send metric to mdm + if !record["ControllerKind"].nil? && record["ControllerKind"].downcase == Constants::CONTROLLER_KIND_JOB + @inventoryToMdmConvertor.process_record_for_terminated_job_metric(record["ControllerName"], record["Namespace"], containerStatus) + end + end + + # Record the last state of the container. This may have information on why a container was killed. + begin + if !container["lastState"].nil? && container["lastState"].keys.length == 1 + lastStateName = container["lastState"].keys[0] + lastStateObject = container["lastState"][lastStateName] + if !lastStateObject.is_a?(Hash) + raise "expected a hash object. This could signify a bug or a kubernetes API change" + end + + if lastStateObject.key?("reason") && lastStateObject.key?("startedAt") && lastStateObject.key?("finishedAt") + newRecord = Hash.new + newRecord["lastState"] = lastStateName # get the name of the last state (ex: terminated) + lastStateReason = lastStateObject["reason"] + # newRecord["reason"] = lastStateObject["reason"] # (ex: OOMKilled) + newRecord["reason"] = lastStateReason # (ex: OOMKilled) + newRecord["startedAt"] = lastStateObject["startedAt"] # (ex: 2019-07-02T14:58:51Z) + lastFinishedTime = lastStateObject["finishedAt"] + newRecord["finishedAt"] = lastFinishedTime # (ex: 2019-07-02T14:58:52Z) + + # only write to the output field if everything previously ran without error + record["ContainerLastStatus"] = newRecord + + #Populate mdm metric for OOMKilled container count if lastStateReason is OOMKilled + if lastStateReason.downcase == Constants::REASON_OOM_KILLED + @inventoryToMdmConvertor.process_record_for_oom_killed_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + end + lastStateReason = nil + else + record["ContainerLastStatus"] = Hash.new + end + else + record["ContainerLastStatus"] = Hash.new + end + + #Populate mdm metric for container restart count if greater than 0 + if (!containerRestartCount.nil? && (containerRestartCount.is_a? Integer) && containerRestartCount > 0) + @inventoryToMdmConvertor.process_record_for_container_restarts_metric(record["ControllerName"], record["Namespace"], lastFinishedTime) + end + rescue => errorStr + $log.warn "Failed in parse_and_emit_record pod inventory while processing ContainerLastStatus: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + record["ContainerLastStatus"] = Hash.new + end + + podRestartCount += containerRestartCount + records.push(record.dup) + end + else # for unscheduled pods there are no status.containerStatuses, in this case we still want the pod + records.push(record) + end #container status block end + + records.each do |record| + if !record.nil? + record["PodRestartCount"] = podRestartCount + end + end + rescue => error + $log.warn("getPodInventoryRecords failed: #{error}") + end + return records + end + + # TODO - move this method to KubernetesClient or helper class + def getServiceNameFromLabels(namespace, labels, serviceRecords) serviceName = "" begin if !labels.nil? && !labels.empty? - if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].empty?) - serviceList["items"].each do |item| - found = 0 - if !item["spec"].nil? && !item["spec"]["selector"].nil? && item["metadata"]["namespace"] == namespace - selectorLabels = item["spec"]["selector"] - if !selectorLabels.empty? - selectorLabels.each do |key, value| - if !(labels.select { |k, v| k == key && v == value }.length > 0) - break - end - found = found + 1 + serviceRecords.each do |kubeServiceRecord| + found = 0 + if kubeServiceRecord["Namespace"] == namespace + selectorLabels = {} + # selector labels wrapped in array in kube service records so unwrapping here + if !kubeServiceRecord["SelectorLabels"].nil? && kubeServiceRecord["SelectorLabels"].length > 0 + selectorLabels = kubeServiceRecord["SelectorLabels"][0] + end + if !selectorLabels.nil? && !selectorLabels.empty? + selectorLabels.each do |key, value| + if !(labels.select { |k, v| k == key && v == value }.length > 0) + break end + found = found + 1 end + # service can have no selectors if found == selectorLabels.length - return item["metadata"]["name"] + return kubeServiceRecord["ServiceName"] end end end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb new file mode 100644 index 000000000..861b3a8e1 --- /dev/null +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -0,0 +1,253 @@ +module Fluent + class Kube_PVInventory_Input < Input + Plugin.register_input("kubepvinventory", self) + + @@hostName = (OMS::Common.get_hostname) + + def initialize + super + require "yaml" + require "yajl/json_gem" + require "yajl" + require "time" + require_relative "KubernetesApiClient" + require_relative "ApplicationInsightsUtility" + require_relative "oms_common" + require_relative "omslog" + require_relative "constants" + + # Response size is around 1500 bytes per PV + @PV_CHUNK_SIZE = "5000" + @pvTypeToCountHash = {} + end + + config_param :run_interval, :time, :default => 60 + config_param :tag, :string, :default => "oms.containerinsights.KubePVInventory" + + def configure(conf) + super + end + + def start + if @run_interval + @finished = false + @condition = ConditionVariable.new + @mutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) + @@pvTelemetryTimeTracker = DateTime.now.to_time.to_i + end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join + end + end + + def enumerate + begin + pvInventory = nil + telemetryFlush = false + @pvTypeToCountHash = {} + currentTime = Time.now + batchTime = currentTime.utc.iso8601 + + continuationToken = nil + $log.info("in_kube_pvinventory::enumerate : Getting PVs from Kube API @ #{Time.now.utc.iso8601}") + continuationToken, pvInventory = KubernetesApiClient.getResourcesAndContinuationToken("persistentvolumes?limit=#{@PV_CHUNK_SIZE}") + $log.info("in_kube_pvinventory::enumerate : Done getting PVs from Kube API @ #{Time.now.utc.iso8601}") + + if (!pvInventory.nil? && !pvInventory.empty? && pvInventory.key?("items") && !pvInventory["items"].nil? && !pvInventory["items"].empty?) + parse_and_emit_records(pvInventory, batchTime) + else + $log.warn "in_kube_pvinventory::enumerate:Received empty pvInventory" + end + + # If we receive a continuation token, make calls, process and flush data until we have processed all data + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, pvInventory = KubernetesApiClient.getResourcesAndContinuationToken("persistentvolumes?limit=#{@PV_CHUNK_SIZE}&continue=#{continuationToken}") + if (!pvInventory.nil? && !pvInventory.empty? && pvInventory.key?("items") && !pvInventory["items"].nil? && !pvInventory["items"].empty?) + parse_and_emit_records(pvInventory, batchTime) + else + $log.warn "in_kube_pvinventory::enumerate:Received empty pvInventory" + end + end + + # Setting this to nil so that we dont hold memory until GC kicks in + pvInventory = nil + + # Adding telemetry to send pod telemetry every 10 minutes + timeDifference = (DateTime.now.to_time.to_i - @@pvTelemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + telemetryFlush = true + end + + # Flush AppInsights telemetry once all the processing is done + if telemetryFlush == true + telemetryProperties = {} + telemetryProperties["CountsOfPVTypes"] = @pvTypeToCountHash.to_json + ApplicationInsightsUtility.sendCustomEvent(Constants::PV_INVENTORY_HEART_BEAT_EVENT, telemetryProperties) + @@pvTelemetryTimeTracker = DateTime.now.to_time.to_i + end + + rescue => errorStr + $log.warn "in_kube_pvinventory::enumerate:Failed in enumerate: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end # end enumerate + + def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) + currentTime = Time.now + emitTime = currentTime.to_f + eventStream = MultiEventStream.new + + begin + records = [] + pvInventory["items"].each do |item| + + # Node, pod, & usage info can be found by joining with pvUsedBytes metric using PVCNamespace/PVCName + record = {} + record["CollectionTime"] = batchTime + record["ClusterId"] = KubernetesApiClient.getClusterId + record["ClusterName"] = KubernetesApiClient.getClusterName + record["PVName"] = item["metadata"]["name"] + record["PVStatus"] = item["status"]["phase"] + record["PVAccessModes"] = item["spec"]["accessModes"].join(', ') + record["PVStorageClassName"] = item["spec"]["storageClassName"] + record["PVCapacityBytes"] = KubernetesApiClient.getMetricNumericValue("memory", item["spec"]["capacity"]["storage"]) + record["PVCreationTimeStamp"] = item["metadata"]["creationTimestamp"] + + # Optional values + pvcNamespace, pvcName = getPVCInfo(item) + type, typeInfo = getTypeInfo(item) + record["PVCNamespace"] = pvcNamespace + record["PVCName"] = pvcName + record["PVType"] = type + record["PVTypeInfo"] = typeInfo + + records.push(record) + + # Record telemetry + if type == nil + type = "empty" + end + if (@pvTypeToCountHash.has_key? type) + @pvTypeToCountHash[type] += 1 + else + @pvTypeToCountHash[type] = 1 + end + end + + records.each do |record| + if !record.nil? + wrapper = { + "DataType" => "KUBE_PV_INVENTORY_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [record.each { |k, v| record[k] = v }], + } + eventStream.add(emitTime, wrapper) if wrapper + end + end + + router.emit_stream(@tag, eventStream) if eventStream + + rescue => errorStr + $log.warn "Failed in parse_and_emit_record for in_kube_pvinventory: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + + def getPVCInfo(item) + begin + if !item["spec"].nil? && !item["spec"]["claimRef"].nil? + claimRef = item["spec"]["claimRef"] + pvcNamespace = claimRef["namespace"] + pvcName = claimRef["name"] + return pvcNamespace, pvcName + end + rescue => errorStr + $log.warn "Failed in getPVCInfo for in_kube_pvinventory: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + + # No PVC or an error + return nil, nil + end + + def getTypeInfo(item) + begin + if !item["spec"].nil? + (Constants::PV_TYPES).each do |pvType| + + # PV is this type + if !item["spec"][pvType].nil? + + # Get additional info if azure disk/file + typeInfo = {} + if pvType == "azureDisk" + azureDisk = item["spec"]["azureDisk"] + typeInfo["DiskName"] = azureDisk["diskName"] + typeInfo["DiskUri"] = azureDisk["diskURI"] + elsif pvType == "azureFile" + typeInfo["FileShareName"] = item["spec"]["azureFile"]["shareName"] + end + + # Can only have one type: return right away when found + return pvType, typeInfo + + end + end + end + rescue => errorStr + $log.warn "Failed in getTypeInfo for in_kube_pvinventory: #{errorStr}" + $log.debug_backtrace(errorStr.backtrace) + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + + # No matches from list of types or an error + return nil, {} + end + + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) + done = @finished + @mutex.unlock + if !done + begin + $log.info("in_kube_pvinventory::run_periodic.enumerate.start #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kube_pvinventory::run_periodic.enumerate.end #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kube_pvinventory::run_periodic: enumerate Failed to retrieve pod inventory: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) + end + end + @mutex.lock + end + @mutex.unlock + end + + end # Kube_PVInventory_Input +end # module \ No newline at end of file diff --git a/source/plugins/ruby/in_kubestate_deployments.rb b/source/plugins/ruby/in_kubestate_deployments.rb index bcf397150..27e4709a2 100644 --- a/source/plugins/ruby/in_kubestate_deployments.rb +++ b/source/plugins/ruby/in_kubestate_deployments.rb @@ -2,230 +2,238 @@ # frozen_string_literal: true module Fluent - class Kube_Kubestate_Deployments_Input < Input - Plugin.register_input("kubestatedeployments", self) - @@istestvar = ENV["ISTEST"] - # telemetry - To keep telemetry cost reasonable, we keep track of the max deployments over a period of 15m - @@deploymentsCount = 0 - - - - def initialize - super - require "yajl/json_gem" - require "yajl" - require "date" - require "time" - - require_relative "KubernetesApiClient" - require_relative "oms_common" - require_relative "omslog" - require_relative "ApplicationInsightsUtility" - require_relative "constants" - - # roughly each deployment is 8k - # 1000 deployments account to approximately 8MB - @DEPLOYMENTS_CHUNK_SIZE = 1000 - @DEPLOYMENTS_API_GROUP = "apps" - @@telemetryLastSentTime = DateTime.now.to_time.to_i - - - @deploymentsRunningTotal = 0 - - @NodeName = OMS::Common.get_hostname - @ClusterId = KubernetesApiClient.getClusterId - @ClusterName = KubernetesApiClient.getClusterName - end - - config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG - - def configure(conf) - super - end - - def start - if @run_interval - @finished = false - @condition = ConditionVariable.new - @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) + class Kube_Kubestate_Deployments_Input < Input + Plugin.register_input("kubestatedeployments", self) + @@istestvar = ENV["ISTEST"] + # telemetry - To keep telemetry cost reasonable, we keep track of the max deployments over a period of 15m + @@deploymentsCount = 0 + + def initialize + super + require "yajl/json_gem" + require "yajl" + require "date" + require "time" + + require_relative "KubernetesApiClient" + require_relative "oms_common" + require_relative "omslog" + require_relative "ApplicationInsightsUtility" + require_relative "constants" + + # refer tomlparser-agent-config for defaults + # this configurable via configmap + @DEPLOYMENTS_CHUNK_SIZE = 0 + + @DEPLOYMENTS_API_GROUP = "apps" + @@telemetryLastSentTime = DateTime.now.to_time.to_i + + @deploymentsRunningTotal = 0 + + @NodeName = OMS::Common.get_hostname + @ClusterId = KubernetesApiClient.getClusterId + @ClusterName = KubernetesApiClient.getClusterName + end + + config_param :run_interval, :time, :default => 60 + config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG + + def configure(conf) + super + end + + def start + if @run_interval + if !ENV["DEPLOYMENTS_CHUNK_SIZE"].nil? && !ENV["DEPLOYMENTS_CHUNK_SIZE"].empty? && ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i > 0 + @DEPLOYMENTS_CHUNK_SIZE = ENV["DEPLOYMENTS_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kubestate_deployments::start: setting to default value since got DEPLOYMENTS_CHUNK_SIZE nil or empty") + @DEPLOYMENTS_CHUNK_SIZE = 500 end + $log.info("in_kubestate_deployments::start : DEPLOYMENTS_CHUNK_SIZE @ #{@DEPLOYMENTS_CHUNK_SIZE}") + + @finished = false + @condition = ConditionVariable.new + @mutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) end - - def shutdown - if @run_interval - @mutex.synchronize { - @finished = true - @condition.signal - } - @thread.join - end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join end - - def enumerate - begin - deploymentList = nil - currentTime = Time.now - batchTime = currentTime.utc.iso8601 - - #set the running total for this batch to 0 - @deploymentsRunningTotal = 0 - - # Initializing continuation token to nil - continuationToken = nil - $log.info("in_kubestate_deployments::enumerate : Getting deployments from Kube API @ #{Time.now.utc.iso8601}") - continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}", api_group: @DEPLOYMENTS_API_GROUP) - $log.info("in_kubestate_deployments::enumerate : Done getting deployments from Kube API @ #{Time.now.utc.iso8601}") + end + + def enumerate + begin + deploymentList = nil + currentTime = Time.now + batchTime = currentTime.utc.iso8601 + + #set the running total for this batch to 0 + @deploymentsRunningTotal = 0 + + # Initializing continuation token to nil + continuationToken = nil + $log.info("in_kubestate_deployments::enumerate : Getting deployments from Kube API @ #{Time.now.utc.iso8601}") + continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}", api_group: @DEPLOYMENTS_API_GROUP) + $log.info("in_kubestate_deployments::enumerate : Done getting deployments from Kube API @ #{Time.now.utc.iso8601}") + if (!deploymentList.nil? && !deploymentList.empty? && deploymentList.key?("items") && !deploymentList["items"].nil? && !deploymentList["items"].empty?) + $log.info("in_kubestate_deployments::enumerate : number of deployment items :#{deploymentList["items"].length} from Kube API @ #{Time.now.utc.iso8601}") + parse_and_emit_records(deploymentList, batchTime) + else + $log.warn "in_kubestate_deployments::enumerate:Received empty deploymentList" + end + + #If we receive a continuation token, make calls, process and flush data until we have processed all data + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @DEPLOYMENTS_API_GROUP) if (!deploymentList.nil? && !deploymentList.empty? && deploymentList.key?("items") && !deploymentList["items"].nil? && !deploymentList["items"].empty?) + $log.info("in_kubestate_deployments::enumerate : number of deployment items :#{deploymentList["items"].length} from Kube API @ #{Time.now.utc.iso8601}") parse_and_emit_records(deploymentList, batchTime) else $log.warn "in_kubestate_deployments::enumerate:Received empty deploymentList" end - - #If we receive a continuation token, make calls, process and flush data until we have processed all data - while (!continuationToken.nil? && !continuationToken.empty?) - continuationToken, deploymentList = KubernetesApiClient.getResourcesAndContinuationToken("deployments?limit=#{@DEPLOYMENTS_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @DEPLOYMENTS_API_GROUP) - if (!deploymentList.nil? && !deploymentList.empty? && deploymentList.key?("items") && !deploymentList["items"].nil? && !deploymentList["items"].empty?) - parse_and_emit_records(deploymentList, batchTime) - else - $log.warn "in_kubestate_deployments::enumerate:Received empty deploymentList" - end + end + + # Setting this to nil so that we dont hold memory until GC kicks in + deploymentList = nil + + $log.info("successfully emitted a total of #{@deploymentsRunningTotal} kube_state_deployment metrics") + # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 + if (@deploymentsRunningTotal > @@deploymentsCount) + @@deploymentsCount = @deploymentsRunningTotal + end + if (((DateTime.now.to_time.to_i - @@telemetryLastSentTime).abs) / 60) >= Constants::KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES + #send telemetry + $log.info "sending deployemt telemetry..." + ApplicationInsightsUtility.sendMetricTelemetry("MaxDeploymentCount", @@deploymentsCount, {}) + #reset last sent value & time + @@deploymentsCount = 0 + @@telemetryLastSentTime = DateTime.now.to_time.to_i + end + rescue => errorStr + $log.warn "in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}") + end + end # end enumerate + + def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) + metricItems = [] + insightsMetricsEventStream = MultiEventStream.new + begin + metricInfo = deployments + metricInfo["items"].each do |deployment| + deploymentName = deployment["metadata"]["name"] + deploymentNameSpace = deployment["metadata"]["namespace"] + deploymentCreatedTime = "" + if !deployment["metadata"]["creationTimestamp"].nil? + deploymentCreatedTime = deployment["metadata"]["creationTimestamp"] + end + deploymentStrategy = "RollingUpdate" #default when not specified as per spec + if !deployment["spec"]["strategy"].nil? && !deployment["spec"]["strategy"]["type"].nil? + deploymentStrategy = deployment["spec"]["strategy"]["type"] end - - # Setting this to nil so that we dont hold memory until GC kicks in - deploymentList = nil - - $log.info("successfully emitted a total of #{@deploymentsRunningTotal} kube_state_deployment metrics") - # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 - if (@deploymentsRunningTotal > @@deploymentsCount) - @@deploymentsCount = @deploymentsRunningTotal + deploymentSpecReplicas = 1 #default is 1 as per k8s spec + if !deployment["spec"]["replicas"].nil? + deploymentSpecReplicas = deployment["spec"]["replicas"] end - if (((DateTime.now.to_time.to_i - @@telemetryLastSentTime).abs)/60 ) >= Constants::KUBE_STATE_TELEMETRY_FLUSH_INTERVAL_IN_MINUTES - #send telemetry - $log.info "sending deployemt telemetry..." - ApplicationInsightsUtility.sendMetricTelemetry("MaxDeploymentCount", @@deploymentsCount, {}) - #reset last sent value & time - @@deploymentsCount = 0 - @@telemetryLastSentTime = DateTime.now.to_time.to_i + deploymentStatusReadyReplicas = 0 + if !deployment["status"]["readyReplicas"].nil? + deploymentStatusReadyReplicas = deployment["status"]["readyReplicas"] end - rescue => errorStr - $log.warn "in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::enumerate:Failed in enumerate: #{errorStr}") + deploymentStatusUpToDateReplicas = 0 + if !deployment["status"]["updatedReplicas"].nil? + deploymentStatusUpToDateReplicas = deployment["status"]["updatedReplicas"] + end + deploymentStatusAvailableReplicas = 0 + if !deployment["status"]["availableReplicas"].nil? + deploymentStatusAvailableReplicas = deployment["status"]["availableReplicas"] + end + + metricItem = {} + metricItem["CollectionTime"] = batchTime + metricItem["Computer"] = @NodeName + metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE + metricItem["Value"] = deploymentStatusReadyReplicas + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME] = deploymentName + metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = deploymentNameSpace + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY] = deploymentStrategy + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = deploymentCreatedTime + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS] = deploymentSpecReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED] = deploymentStatusUpToDateReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE] = deploymentStatusAvailableReplicas + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) end - end # end enumerate - - def parse_and_emit_records(deployments, batchTime = Time.utc.iso8601) - metricItems = [] - insightsMetricsEventStream = MultiEventStream.new - begin - metricInfo = deployments - metricInfo["items"].each do |deployment| - deploymentName = deployment["metadata"]["name"] - deploymentNameSpace = deployment["metadata"]["namespace"] - deploymentCreatedTime = "" - if !deployment["metadata"]["creationTimestamp"].nil? - deploymentCreatedTime = deployment["metadata"]["creationTimestamp"] - end - deploymentStrategy = "RollingUpdate" #default when not specified as per spec - if !deployment["spec"]["strategy"].nil? && !deployment["spec"]["strategy"]["type"].nil? - deploymentStrategy = deployment["spec"]["strategy"]["type"] - end - deploymentSpecReplicas = 1 #default is 1 as per k8s spec - if !deployment["spec"]["replicas"].nil? - deploymentSpecReplicas = deployment["spec"]["replicas"] - end - deploymentStatusReadyReplicas = 0 - if !deployment["status"]["readyReplicas"].nil? - deploymentStatusReadyReplicas = deployment["status"]["readyReplicas"] - end - deploymentStatusUpToDateReplicas = 0 - if !deployment["status"]["updatedReplicas"].nil? - deploymentStatusUpToDateReplicas = deployment["status"]["updatedReplicas"] - end - deploymentStatusAvailableReplicas = 0 - if !deployment["status"]["availableReplicas"].nil? - deploymentStatusAvailableReplicas = deployment["status"]["availableReplicas"] - end - - metricItem = {} - metricItem["CollectionTime"] = batchTime - metricItem["Computer"] = @NodeName - metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_DEPLOYMENT_STATE - metricItem["Value"] = deploymentStatusReadyReplicas - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_NAME] = deploymentName - metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = deploymentNameSpace - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STRATEGY ] = deploymentStrategy - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = deploymentCreatedTime - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_SPEC_REPLICAS] = deploymentSpecReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_UPDATED] = deploymentStatusUpToDateReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_DEPLOYMENT_STATUS_REPLICAS_AVAILABLE] = deploymentStatusAvailableReplicas - - - metricItem["Tags"] = metricTags - - metricItems.push(metricItem) - end - - time = Time.now.to_f - metricItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - $log.info("successfully emitted #{metricItems.length()} kube_state_deployment metrics") - @deploymentsRunningTotal = @deploymentsRunningTotal + metricItems.length() - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubestatedeploymentsInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - rescue => error - $log.warn("in_kubestate_deployments::parse_and_emit_records failed: #{error} ") - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::parse_and_emit_records failed: #{error}") + + time = Time.now.to_f + metricItems.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(time, wrapper) if wrapper + end + + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + $log.info("successfully emitted #{metricItems.length()} kube_state_deployment metrics") + + @deploymentsRunningTotal = @deploymentsRunningTotal + metricItems.length() + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) + $log.info("kubestatedeploymentsInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - + rescue => error + $log.warn("in_kubestate_deployments::parse_and_emit_records failed: #{error} ") + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::parse_and_emit_records failed: #{error}") end - - def run_periodic - @mutex.lock + end + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) done = @finished - @nextTimeToRun = Time.now - @waitTimeout = @run_interval - until done - @nextTimeToRun = @nextTimeToRun + @run_interval - @now = Time.now - if @nextTimeToRun <= @now - @waitTimeout = 1 - @nextTimeToRun = @now - else - @waitTimeout = @nextTimeToRun - @now - end - @condition.wait(@mutex, @waitTimeout) - done = @finished - @mutex.unlock - if !done - begin - $log.info("in_kubestate_deployments::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") - enumerate - $log.info("in_kubestate_deployments::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") - rescue => errorStr - $log.warn "in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}") - end + @mutex.unlock + if !done + begin + $log.info("in_kubestate_deployments::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kubestate_deployments::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_deployments::run_periodic: enumerate Failed to retrieve kube deployments: #{errorStr}") end - @mutex.lock end - @mutex.unlock + @mutex.lock end + @mutex.unlock end -end \ No newline at end of file + end +end diff --git a/source/plugins/ruby/in_kubestate_hpa.rb b/source/plugins/ruby/in_kubestate_hpa.rb index 3ce63a75a..afecf8e3b 100644 --- a/source/plugins/ruby/in_kubestate_hpa.rb +++ b/source/plugins/ruby/in_kubestate_hpa.rb @@ -2,231 +2,236 @@ # frozen_string_literal: true module Fluent - class Kube_Kubestate_HPA_Input < Input - Plugin.register_input("kubestatehpa", self) - @@istestvar = ENV["ISTEST"] - - - def initialize - super - require "yajl/json_gem" - require "yajl" - require "time" - - require_relative "KubernetesApiClient" - require_relative "oms_common" - require_relative "omslog" - require_relative "ApplicationInsightsUtility" - require_relative "constants" - - # roughly each HPA is 3k - # 2000 HPAs account to approximately 6-7MB - @HPA_CHUNK_SIZE = 2000 - @HPA_API_GROUP = "autoscaling" - - # telemetry - @hpaCount = 0 - - @NodeName = OMS::Common.get_hostname - @ClusterId = KubernetesApiClient.getClusterId - @ClusterName = KubernetesApiClient.getClusterName - end - - config_param :run_interval, :time, :default => 60 - config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG - - def configure(conf) - super - end - - def start - if @run_interval - @finished = false - @condition = ConditionVariable.new - @mutex = Mutex.new - @thread = Thread.new(&method(:run_periodic)) + class Kube_Kubestate_HPA_Input < Input + Plugin.register_input("kubestatehpa", self) + @@istestvar = ENV["ISTEST"] + + def initialize + super + require "yajl/json_gem" + require "yajl" + require "time" + + require_relative "KubernetesApiClient" + require_relative "oms_common" + require_relative "omslog" + require_relative "ApplicationInsightsUtility" + require_relative "constants" + + # refer tomlparser-agent-config for defaults + # this configurable via configmap + @HPA_CHUNK_SIZE = 0 + + @HPA_API_GROUP = "autoscaling" + + # telemetry + @hpaCount = 0 + + @NodeName = OMS::Common.get_hostname + @ClusterId = KubernetesApiClient.getClusterId + @ClusterName = KubernetesApiClient.getClusterName + end + + config_param :run_interval, :time, :default => 60 + config_param :tag, :string, :default => Constants::INSIGHTSMETRICS_FLUENT_TAG + + def configure(conf) + super + end + + def start + if @run_interval + if !ENV["HPA_CHUNK_SIZE"].nil? && !ENV["HPA_CHUNK_SIZE"].empty? && ENV["HPA_CHUNK_SIZE"].to_i > 0 + @HPA_CHUNK_SIZE = ENV["HPA_CHUNK_SIZE"].to_i + else + # this shouldnt happen just setting default here as safe guard + $log.warn("in_kubestate_hpa::start: setting to default value since got HPA_CHUNK_SIZE nil or empty") + @HPA_CHUNK_SIZE = 2000 end + $log.info("in_kubestate_hpa::start : HPA_CHUNK_SIZE @ #{@HPA_CHUNK_SIZE}") + + @finished = false + @condition = ConditionVariable.new + @mutex = Mutex.new + @thread = Thread.new(&method(:run_periodic)) end - - def shutdown - if @run_interval - @mutex.synchronize { - @finished = true - @condition.signal - } - @thread.join - end + end + + def shutdown + if @run_interval + @mutex.synchronize { + @finished = true + @condition.signal + } + @thread.join end - - def enumerate - begin - hpaList = nil - currentTime = Time.now - batchTime = currentTime.utc.iso8601 - - @hpaCount = 0 - - # Initializing continuation token to nil - continuationToken = nil - $log.info("in_kubestate_hpa::enumerate : Getting HPAs from Kube API @ #{Time.now.utc.iso8601}") - continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}", api_group: @HPA_API_GROUP) - $log.info("in_kubestate_hpa::enumerate : Done getting HPAs from Kube API @ #{Time.now.utc.iso8601}") + end + + def enumerate + begin + hpaList = nil + currentTime = Time.now + batchTime = currentTime.utc.iso8601 + + @hpaCount = 0 + + # Initializing continuation token to nil + continuationToken = nil + $log.info("in_kubestate_hpa::enumerate : Getting HPAs from Kube API @ #{Time.now.utc.iso8601}") + continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}", api_group: @HPA_API_GROUP) + $log.info("in_kubestate_hpa::enumerate : Done getting HPAs from Kube API @ #{Time.now.utc.iso8601}") + if (!hpaList.nil? && !hpaList.empty? && hpaList.key?("items") && !hpaList["items"].nil? && !hpaList["items"].empty?) + parse_and_emit_records(hpaList, batchTime) + else + $log.warn "in_kubestate_hpa::enumerate:Received empty hpaList" + end + + #If we receive a continuation token, make calls, process and flush data until we have processed all data + while (!continuationToken.nil? && !continuationToken.empty?) + continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @HPA_API_GROUP) if (!hpaList.nil? && !hpaList.empty? && hpaList.key?("items") && !hpaList["items"].nil? && !hpaList["items"].empty?) parse_and_emit_records(hpaList, batchTime) else $log.warn "in_kubestate_hpa::enumerate:Received empty hpaList" end - - #If we receive a continuation token, make calls, process and flush data until we have processed all data - while (!continuationToken.nil? && !continuationToken.empty?) - continuationToken, hpaList = KubernetesApiClient.getResourcesAndContinuationToken("horizontalpodautoscalers?limit=#{@HPA_CHUNK_SIZE}&continue=#{continuationToken}", api_group: @HPA_API_GROUP) - if (!hpaList.nil? && !hpaList.empty? && hpaList.key?("items") && !hpaList["items"].nil? && !hpaList["items"].empty?) - parse_and_emit_records(hpaList, batchTime) - else - $log.warn "in_kubestate_hpa::enumerate:Received empty hpaList" + end + + # Setting this to nil so that we dont hold memory until GC kicks in + hpaList = nil + + # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 + if (@hpaCount > 0) + # this will not be a useful telemetry, as hpa counts will not be huge, just log for now + $log.info("in_kubestate_hpa::hpaCount= #{hpaCount}") + #ApplicationInsightsUtility.sendMetricTelemetry("HPACount", @hpaCount, {}) + end + rescue => errorStr + $log.warn "in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}") + end + end # end enumerate + + def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) + metricItems = [] + insightsMetricsEventStream = MultiEventStream.new + begin + metricInfo = hpas + metricInfo["items"].each do |hpa| + hpaName = hpa["metadata"]["name"] + hpaNameSpace = hpa["metadata"]["namespace"] + hpaCreatedTime = "" + if !hpa["metadata"]["creationTimestamp"].nil? + hpaCreatedTime = hpa["metadata"]["creationTimestamp"] + end + hpaSpecMinReplicas = 1 #default is 1 as per k8s spec + if !hpa["spec"]["minReplicas"].nil? + hpaSpecMinReplicas = hpa["spec"]["minReplicas"] + end + hpaSpecMaxReplicas = 0 + if !hpa["spec"]["maxReplicas"].nil? + hpaSpecMaxReplicas = hpa["spec"]["maxReplicas"] + end + hpaSpecScaleTargetKind = "" + hpaSpecScaleTargetName = "" + if !hpa["spec"]["scaleTargetRef"].nil? + if !hpa["spec"]["scaleTargetRef"]["kind"].nil? + hpaSpecScaleTargetKind = hpa["spec"]["scaleTargetRef"]["kind"] + end + if !hpa["spec"]["scaleTargetRef"]["name"].nil? + hpaSpecScaleTargetName = hpa["spec"]["scaleTargetRef"]["name"] end end - - # Setting this to nil so that we dont hold memory until GC kicks in - hpaList = nil - - # Flush AppInsights telemetry once all the processing is done, only if the number of events flushed is greater than 0 - if (@hpaCount > 0) - # this will not be a useful telemetry, as hpa counts will not be huge, just log for now - $log.info("in_kubestate_hpa::hpaCount= #{hpaCount}") - #ApplicationInsightsUtility.sendMetricTelemetry("HPACount", @hpaCount, {}) + hpaStatusCurrentReplicas = 0 + if !hpa["status"]["currentReplicas"].nil? + hpaStatusCurrentReplicas = hpa["status"]["currentReplicas"] end - rescue => errorStr - $log.warn "in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::enumerate:Failed in enumerate: #{errorStr}") + hpaStatusDesiredReplicas = 0 + if !hpa["status"]["desiredReplicas"].nil? + hpaStatusDesiredReplicas = hpa["status"]["desiredReplicas"] + end + + hpaStatuslastScaleTime = "" + if !hpa["status"]["lastScaleTime"].nil? + hpaStatuslastScaleTime = hpa["status"]["lastScaleTime"] + end + + metricItem = {} + metricItem["CollectionTime"] = batchTime + metricItem["Computer"] = @NodeName + metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE + metricItem["Value"] = hpaStatusCurrentReplicas + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE + + metricTags = {} + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME] = hpaName + metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = hpaNameSpace + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = hpaCreatedTime + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS] = hpaSpecMinReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS] = hpaSpecMaxReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND] = hpaSpecScaleTargetKind + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME] = hpaSpecScaleTargetName + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS] = hpaStatusDesiredReplicas + metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME] = hpaStatuslastScaleTime + + metricItem["Tags"] = metricTags + + metricItems.push(metricItem) end - end # end enumerate - - def parse_and_emit_records(hpas, batchTime = Time.utc.iso8601) - metricItems = [] - insightsMetricsEventStream = MultiEventStream.new - begin - metricInfo = hpas - metricInfo["items"].each do |hpa| - hpaName = hpa["metadata"]["name"] - hpaNameSpace = hpa["metadata"]["namespace"] - hpaCreatedTime = "" - if !hpa["metadata"]["creationTimestamp"].nil? - hpaCreatedTime = hpa["metadata"]["creationTimestamp"] - end - hpaSpecMinReplicas = 1 #default is 1 as per k8s spec - if !hpa["spec"]["minReplicas"].nil? - hpaSpecMinReplicas = hpa["spec"]["minReplicas"] - end - hpaSpecMaxReplicas = 0 - if !hpa["spec"]["maxReplicas"].nil? - hpaSpecMaxReplicas = hpa["spec"]["maxReplicas"] - end - hpaSpecScaleTargetKind = "" - hpaSpecScaleTargetName = "" - if !hpa["spec"]["scaleTargetRef"].nil? - if !hpa["spec"]["scaleTargetRef"]["kind"].nil? - hpaSpecScaleTargetKind = hpa["spec"]["scaleTargetRef"]["kind"] - end - if !hpa["spec"]["scaleTargetRef"]["name"].nil? - hpaSpecScaleTargetName = hpa["spec"]["scaleTargetRef"]["name"] - end - - end - hpaStatusCurrentReplicas = 0 - if !hpa["status"]["currentReplicas"].nil? - hpaStatusCurrentReplicas = hpa["status"]["currentReplicas"] - end - hpaStatusDesiredReplicas = 0 - if !hpa["status"]["desiredReplicas"].nil? - hpaStatusDesiredReplicas = hpa["status"]["desiredReplicas"] - end - - hpaStatuslastScaleTime = "" - if !hpa["status"]["lastScaleTime"].nil? - hpaStatuslastScaleTime = hpa["status"]["lastScaleTime"] - end - - - metricItem = {} - metricItem["CollectionTime"] = batchTime - metricItem["Computer"] = @NodeName - metricItem["Name"] = Constants::INSIGHTSMETRICS_METRIC_NAME_KUBE_STATE_HPA_STATE - metricItem["Value"] = hpaStatusCurrentReplicas - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN - metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_KUBESTATE_NAMESPACE - - metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = @ClusterId - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = @ClusterName - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_NAME] = hpaName - metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = hpaNameSpace - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_CREATIONTIME] = hpaCreatedTime - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MIN_REPLICAS] = hpaSpecMinReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_MAX_REPLICAS] = hpaSpecMaxReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_KIND] = hpaSpecScaleTargetKind - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_SPEC_SCALE_TARGET_NAME] = hpaSpecScaleTargetName - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_DESIRED_REPLICAS] = hpaStatusDesiredReplicas - metricTags[Constants::INSIGHTSMETRICS_TAGS_KUBE_STATE_HPA_STATUS_LAST_SCALE_TIME] = hpaStatuslastScaleTime - - - metricItem["Tags"] = metricTags - - metricItems.push(metricItem) - end - time = Time.now.to_f - metricItems.each do |insightsMetricsRecord| - wrapper = { - "DataType" => "INSIGHTS_METRICS_BLOB", - "IPName" => "ContainerInsights", - "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], - } - insightsMetricsEventStream.add(time, wrapper) if wrapper - end - - router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream - $log.info("successfully emitted #{metricItems.length()} kube_state_hpa metrics") - if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) - $log.info("kubestatehpaInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") - end - rescue => error - $log.warn("in_kubestate_hpa::parse_and_emit_records failed: #{error} ") - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::parse_and_emit_records failed: #{error}") + time = Time.now.to_f + metricItems.each do |insightsMetricsRecord| + wrapper = { + "DataType" => "INSIGHTS_METRICS_BLOB", + "IPName" => "ContainerInsights", + "DataItems" => [insightsMetricsRecord.each { |k, v| insightsMetricsRecord[k] = v }], + } + insightsMetricsEventStream.add(time, wrapper) if wrapper + end + + router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream + $log.info("successfully emitted #{metricItems.length()} kube_state_hpa metrics") + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0 && insightsMetricsEventStream.count > 0) + $log.info("kubestatehpaInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") end - + rescue => error + $log.warn("in_kubestate_hpa::parse_and_emit_records failed: #{error} ") + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::parse_and_emit_records failed: #{error}") end - - def run_periodic - @mutex.lock + end + + def run_periodic + @mutex.lock + done = @finished + @nextTimeToRun = Time.now + @waitTimeout = @run_interval + until done + @nextTimeToRun = @nextTimeToRun + @run_interval + @now = Time.now + if @nextTimeToRun <= @now + @waitTimeout = 1 + @nextTimeToRun = @now + else + @waitTimeout = @nextTimeToRun - @now + end + @condition.wait(@mutex, @waitTimeout) done = @finished - @nextTimeToRun = Time.now - @waitTimeout = @run_interval - until done - @nextTimeToRun = @nextTimeToRun + @run_interval - @now = Time.now - if @nextTimeToRun <= @now - @waitTimeout = 1 - @nextTimeToRun = @now - else - @waitTimeout = @nextTimeToRun - @now - end - @condition.wait(@mutex, @waitTimeout) - done = @finished - @mutex.unlock - if !done - begin - $log.info("in_kubestate_hpa::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") - enumerate - $log.info("in_kubestate_hpa::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") - rescue => errorStr - $log.warn "in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}" - ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}") - end + @mutex.unlock + if !done + begin + $log.info("in_kubestate_hpa::run_periodic.enumerate.start @ #{Time.now.utc.iso8601}") + enumerate + $log.info("in_kubestate_hpa::run_periodic.enumerate.end @ #{Time.now.utc.iso8601}") + rescue => errorStr + $log.warn "in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}" + ApplicationInsightsUtility.sendExceptionTelemetry("in_kubestate_hpa::run_periodic: enumerate Failed to retrieve kube hpas: #{errorStr}") end - @mutex.lock end - @mutex.unlock + @mutex.lock end + @mutex.unlock end -end \ No newline at end of file + end +end diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 4fe728579..69beca493 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -189,34 +189,47 @@ def getContainersInfoMap(podItem, isWindows) return containersInfoMap end - def obtainContainerEnvironmentVars(containerId) - $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars @ #{Time.now.utc.iso8601}") + def obtainContainerEnvironmentVars(containerId) envValueString = "" begin - unless @@containerCGroupCache.has_key?(containerId) - $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars fetching cGroup parent pid @ #{Time.now.utc.iso8601} for containerId: #{containerId}") + isCGroupPidFetchRequired = false + if !@@containerCGroupCache.has_key?(containerId) + isCGroupPidFetchRequired = true + else + cGroupPid = @@containerCGroupCache[containerId] + if cGroupPid.nil? || cGroupPid.empty? + isCGroupPidFetchRequired = true + @@containerCGroupCache.delete(containerId) + elsif !File.exist?("/hostfs/proc/#{cGroupPid}/environ") + isCGroupPidFetchRequired = true + @@containerCGroupCache.delete(containerId) + end + end + + if isCGroupPidFetchRequired Dir["/hostfs/proc/*/cgroup"].each do |filename| begin - if File.file?(filename) && File.foreach(filename).grep(/#{containerId}/).any? + if File.file?(filename) && File.exist?(filename) && File.foreach(filename).grep(/#{containerId}/).any? # file full path is /hostfs/proc//cgroup - cGroupPid = filename.split("/")[3] - if @@containerCGroupCache.has_key?(containerId) - tempCGroupPid = @@containerCGroupCache[containerId] - if tempCGroupPid > cGroupPid + cGroupPid = filename.split("/")[3] + if is_number?(cGroupPid) + if @@containerCGroupCache.has_key?(containerId) + tempCGroupPid = @@containerCGroupCache[containerId] + if tempCGroupPid.to_i > cGroupPid.to_i + @@containerCGroupCache[containerId] = cGroupPid + end + else @@containerCGroupCache[containerId] = cGroupPid - end - else - @@containerCGroupCache[containerId] = cGroupPid + end end end - rescue SystemCallError # ignore Error::ENOENT,Errno::ESRCH which is expected if any of the container gone while we read - end - end + rescue SystemCallError # ignore Error::ENOENT,Errno::ESRCH which is expected if any of the container gone while we read + end + end end cGroupPid = @@containerCGroupCache[containerId] if !cGroupPid.nil? && !cGroupPid.empty? - environFilePath = "/hostfs/proc/#{cGroupPid}/environ" - $log.info("KubernetesContainerInventory::obtainContainerEnvironmentVars cGroupPid: #{cGroupPid} environFilePath: #{environFilePath} for containerId: #{containerId}") + environFilePath = "/hostfs/proc/#{cGroupPid}/environ" if File.exist?(environFilePath) # Skip environment variable processing if it contains the flag AZMON_COLLECT_ENV=FALSE # Check to see if the environment variable collection is disabled for this container. @@ -229,8 +242,7 @@ def obtainContainerEnvironmentVars(containerId) if !envVars.nil? && !envVars.empty? envVars = envVars.split("\0") envValueString = envVars.to_json - envValueStringLength = envValueString.length - $log.info("KubernetesContainerInventory::environment vars filename @ #{environFilePath} envVars size @ #{envValueStringLength}") + envValueStringLength = envValueString.length if envValueStringLength >= 200000 lastIndex = envValueString.rindex("\",") if !lastIndex.nil? @@ -341,5 +353,8 @@ def deleteCGroupCacheEntryForDeletedContainer(containerId) ApplicationInsightsUtility.sendExceptionTelemetry(error) end end + def is_number?(value) + true if Integer(value) rescue false + end end end diff --git a/source/plugins/ruby/out_mdm.rb b/source/plugins/ruby/out_mdm.rb index 1c805255a..6238eb51a 100644 --- a/source/plugins/ruby/out_mdm.rb +++ b/source/plugins/ruby/out_mdm.rb @@ -50,6 +50,10 @@ def initialize @cluster_identity = nil @isArcK8sCluster = false @get_access_token_backoff_expiry = Time.now + + @mdm_exceptions_hash = {} + @mdm_exceptions_count = 0 + @mdm_exception_telemetry_time_tracker = DateTime.now.to_time.to_i end def configure(conf) @@ -221,10 +225,49 @@ def format(tag, time, record) end end + def exception_aggregator(error) + begin + errorStr = error.to_s + if (@mdm_exceptions_hash[errorStr].nil?) + @mdm_exceptions_hash[errorStr] = 1 + else + @mdm_exceptions_hash[errorStr] += 1 + end + #Keeping track of all exceptions to send the total in the last flush interval as a metric + @mdm_exceptions_count += 1 + rescue => error + @log.info "Error in MDM exception_aggregator method: #{error}" + ApplicationInsightsUtility.sendExceptionTelemetry(error) + end + end + + def flush_mdm_exception_telemetry + begin + #Flush out exception telemetry as a metric for the last 30 minutes + timeDifference = (DateTime.now.to_time.to_i - @mdm_exception_telemetry_time_tracker).abs + timeDifferenceInMinutes = timeDifference / 60 + if (timeDifferenceInMinutes >= Constants::MDM_EXCEPTIONS_METRIC_FLUSH_INTERVAL) + telemetryProperties = {} + telemetryProperties["ExceptionsHashForFlushInterval"] = @mdm_exceptions_hash.to_json + telemetryProperties["FlushInterval"] = Constants::MDM_EXCEPTIONS_METRIC_FLUSH_INTERVAL + ApplicationInsightsUtility.sendMetricTelemetry(Constants::MDM_EXCEPTION_TELEMETRY_METRIC, @mdm_exceptions_count, telemetryProperties) + # Resetting values after flushing + @mdm_exceptions_count = 0 + @mdm_exceptions_hash = {} + @mdm_exception_telemetry_time_tracker = DateTime.now.to_time.to_i + end + rescue => error + @log.info "Error in flush_mdm_exception_telemetry method: #{error}" + ApplicationInsightsUtility.sendExceptionTelemetry(error) + end + end + # This method is called every flush interval. Send the buffer chunk to MDM. # 'chunk' is a buffer chunk that includes multiple formatted records def write(chunk) begin + # Adding this before trying to flush out metrics, since adding after can lead to metrics never being sent + flush_mdm_exception_telemetry if (!@first_post_attempt_made || (Time.now > @last_post_attempt_time + retry_mdm_post_wait_minutes * 60)) && @can_send_data_to_mdm post_body = [] chunk.msgpack_each { |(tag, record)| @@ -247,7 +290,8 @@ def write(chunk) end end rescue Exception => e - ApplicationInsightsUtility.sendExceptionTelemetry(e) + # Adding exceptions to hash to aggregate and send telemetry for all write errors + exception_aggregator(e) @log.info "Exception when writing to MDM: #{e}" raise e end @@ -282,7 +326,6 @@ def send_to_mdm(post_body) else @log.info "Failed to Post Metrics to MDM : #{e} Response: #{response}" end - #@log.info "MDM request : #{post_body}" @log.debug_backtrace(e.backtrace) if !response.code.empty? && response.code == 403.to_s @log.info "Response Code #{response.code} Updating @last_post_attempt_time" @@ -297,15 +340,15 @@ def send_to_mdm(post_body) @log.info "HTTPServerException when POSTing Metrics to MDM #{e} Response: #{response}" raise e end + # Adding exceptions to hash to aggregate and send telemetry for all 400 error codes + exception_aggregator(e) rescue Errno::ETIMEDOUT => e @log.info "Timed out when POSTing Metrics to MDM : #{e} Response: #{response}" @log.debug_backtrace(e.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(e) raise e rescue Exception => e @log.info "Exception POSTing Metrics to MDM : #{e} Response: #{response}" @log.debug_backtrace(e.backtrace) - ApplicationInsightsUtility.sendExceptionTelemetry(e) raise e end end diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index 834515969..77370e284 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -80,14 +80,14 @@ class Inventory2MdmConvertor @@pod_phase_values = ["Running", "Pending", "Succeeded", "Failed", "Unknown"] @process_incoming_stream = false - def initialize(custom_metrics_azure_regions) + def initialize() @log_path = "/var/opt/microsoft/docker-cimprov/log/mdm_metrics_generator.log" @log = Logger.new(@log_path, 1, 5000000) @pod_count_hash = {} @no_phase_dim_values_hash = {} @pod_count_by_phase = {} @pod_uids = {} - @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability(custom_metrics_azure_regions) + @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @log.debug { "Starting podinventory_to_mdm plugin" } end From dd7afba366c20b953f435bef384aeb2524a85ded Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 23 Feb 2021 16:53:28 -0800 Subject: [PATCH 09/18] Merge ci_dev into ci_prod for 02232021 release (#507) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) * using latest stable version of msys2 (#465) * fixing the windows-perf-dups (#466) * chart updates related to new microsoft/charts repo (#467) * Changes for creating 11092020 release (#468) * MDM exception aggregation (#470) * grwehner/mdm custom metric regions (#471) Remove custom metrics region check for public cloud * updaitng rs limit to 1gb (#474) * grwehner/pv inventory (#455) Add fluentd plugin to request persistent volume info from the kubernetes api and send to LA * Gangams/fix for build release pipeline issue (#476) * use isolated cdpx acr * correct comment * add pv fluentd plugin config to helm rs config (#477) * add pv fluentd plugin to helm rs config * helm rbac permissions for pv api calls * Gangams/fix rs ooming (#473) * optimize kpi * optimize kube node inventory * add flags for events, deployments and hpa * have separate function parseNodeLimits * refactor code * fix crash * fix bug with service name * fix bugs related to get service name * update oom fix test agent * debug logs * fix service label issue * update to latest agent and enable ephemeral annotation * change stream size to 200 from 250 * update yaml * adjust chunksizes * add ruby gc env * yaml changes for cioomtest11282020-3 * telemetry to track pods latency * service count telemetry * rename variables * wip * nodes inventory telemetry * configmap changes * add emit streams in configmap * yaml updates * fix copy and paste bug * add todo comments * fix node latency telemetry bug * update yaml with latest test image * fix bug * upping rs memory change * fix mdm bug with final emit stream * update to latest image * fix pr feedback * fix pr feedback * rename health config to agent config * fix max allowed hpa chunk size * update to use 1k pod chunk since validated on 1.18+ * remove debug logs * minor updates * move defaults to common place * chart updates * final oomfix agent * update to use prod image so that can be validated with build pipeline * fix typo in comment * Gangams/enable arc onboarding to ff (#478) * wip * updates * trigger login if the ctx cloud not same as specified cloud * add missed commit * Convert PV type dictionary to json for telemetry so it shows up in logs (#480) * fix 2 windows tasks - 1) Dont log to termination log 2) enable ADX route for containerlogs in windows (for O365) (#482) * fix ci envvar collection in large pods (#483) * grwehner/jan agent tasks (#481) - Windows agent fix to use log filtering settings in config map. - Error handling for kubelet_utils get_node_capacity in case /metrics/cadvsior endpoint fails. - Remove env variable for workspace key for windows agent * updating fbit version and cpu limit (#485) * reverting to older version (#487) * Gangams/add fbsettings configurable via configmap (#486) * wip * fbit config settings * add config warn message * handle one config provided but not other * fixed pr feedback * fix copy paste error * rename config parameter names * fix typo * fix fbit crash in helm path * fix nil check * Gangams/jan agent release tasks (#484) * wip * explicit amd64 affinity for hybrid workloads * fix space issue * wip * revert vscode setting file * remove per container logs in ci (#488) * updates for ciprod01112021 release (#489) * new yaml files (#491) * Use cloud-specific instrumentation keys (#494) If APPLICATIONINSIGHTS_AUTH_URL is set/non-empty then the agent will now grab a custom IKey from a URL stored in APPLICATIONINSIGHTS_AUTH_URL * upgrade apt to latest version (#492) * upgrade apt to latest version * fix pr feedback * Gangams/add support for extension msi for arc k8s cluster (#495) * wip * add env var for the arc k8s extension name * chart update * extension msi updates * fix bug * revert chart and image to prod version * minor text changes * image tag to prod * wip * wip * wip * wip * final updates * fix whitespaces * simplify crd yaml * Gangams/arm template arc k8s extension (#496) * arm templates for arc k8s extension * update to use official extension type name * update * add identity property * add proxyendpointurl parameter * add default values * Gangams/aks monitoring via policy (#497) * enable monitoring through policy * wip * handle tags * wip * add alias * wip * working * updates * working * with deployment name * doc updates * doc updates * fix typo in the docs * revert to use operatingSystem from osImage for node os telemety (#498) * Container log v2 schema changes (#499) * make pod name in mdsd definition as str for consistency. msgp has no type checking, as it has type metadata in it the message itself. * Add priority class to the daemonsets (#500) * Add priority class to the daemonsets Add a priority class for omsagent and have the daemonsets use this to be sure to schedule the pods. Daemonset pods are constrained in scheduling to run on specific nodes. This is done by the daemonset controller. When a node shows up it will create a pod with a strong affinity to that node. When a node goes away, it will delete the pod with the node affinity to that node. Kubernetes pod scheduling does not know it is a daemonset but it does know it is tied to a specific node. With default scheduling, it is possible for the pods to be "frozen out" of a node because the node already is full. This can happen because "normal" pods may already exist and are looking for a node to get scheduled on when a node is added to the cluster. The daemonset controller will only first create the pod for the node at around the same time. The kubernetes scheduler is running async from all of this and thus there can be a race as to who gets scheduled on the node. The pod priority class (and thus the pod priority) is a way to indicate that the pod has a higher scheduling priority than a default pod. By default, all pods are at priority 0. Higher numbers are higher priority. Setting the priority to something greater than zero will allow the omsagent daemonsets to win a race against "normal" pods for scheduled resources on a node - and will also allow for graceful eviction in the case the node is too full. Without this, omsagent can be left out of node in clusters that are very busy, especially in dynamic scaling situations. I did not test the windows pod as we have no windows clusters. * CR feedback * fix node metric issue (#502) * Bug fixes for Feb release (#504) * bug fix for mdm metrics with no limits * fix exception bug * Gangams/feb 2021 agent bug fix (#505) * fix npe in getKubeServiceRecords * use image fields from spec * fix typo * cover all cases * handle scenario only digest specified * changes for release -ciprod02232021 (#506) * Gangams/e2e test framework (#503) * add agent e2e fw and tests * doc and script updates * add validation script * doc updates * yaml updates * fix typo * doc updates * more doc updates * add ISTEST for helm chart to use arc conf * refactor test code * fix pr feedback * fix pr feedback * fix pr feedback * fix pr feedback Co-authored-by: Ganga Mahesh Siddem Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner Co-authored-by: deagraw Co-authored-by: David Michelman Co-authored-by: Michael Sinz <36865706+Michael-Sinz@users.noreply.github.com> --- .../update-place-holdres-in-e2e-tests.sh | 35 ++ .pipelines/validate-e2e-tests-results.sh | 71 +++ README.md | 31 ++ ReleaseNotes.md | 78 +--- build/common/installer/scripts/tomlparser.rb | 14 + build/version | 4 +- charts/azuremonitor-containers/Chart.yaml | 2 +- charts/azuremonitor-containers/README.md | 2 + .../templates/omsagent-arc-k8s-crd.yaml | 17 + .../templates/omsagent-daemonset-windows.yaml | 5 +- .../templates/omsagent-daemonset.yaml | 11 +- .../templates/omsagent-deployment.yaml | 8 +- .../templates/omsagent-priorityclass.yaml | 22 + .../templates/omsagent-rbac.yaml | 4 + charts/azuremonitor-containers/values.yaml | 56 ++- kubernetes/linux/Dockerfile | 2 +- .../build-and-publish-docker-image.sh | 0 kubernetes/linux/main.sh | 48 +- kubernetes/linux/mdsd.xml | 67 ++- kubernetes/linux/setup.sh | 4 +- kubernetes/omsagent.yaml | 18 +- kubernetes/windows/Dockerfile | 2 +- kubernetes/windows/main.ps1 | 50 +- .../azure-policy.json | 113 +++++ .../azurepolicy.parameters.json | 9 + .../azurepolicy.rules.json | 101 +++++ .../cluster-user-role-binding.yaml | 12 + .../clusteruser/cluster-user-role.yaml | 14 + .../enable-monitoring-using-policy.md | 64 +++ .../onboarding/managed/enable-monitoring.ps1 | 4 +- .../onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- .../existingClusterOnboarding.json | 135 ++++++ .../existingClusterParam.json | 24 + source/plugins/go/src/oms.go | 288 ++++++++---- source/plugins/go/src/utils.go | 2 +- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 132 +++--- source/plugins/ruby/KubernetesApiClient.rb | 2 +- .../plugins/ruby/arc_k8s_cluster_identity.rb | 11 +- source/plugins/ruby/in_kube_events.rb | 4 + source/plugins/ruby/in_kube_nodes.rb | 23 +- source/plugins/ruby/in_kube_podinventory.rb | 12 + source/plugins/ruby/in_kube_pvinventory.rb | 5 +- source/plugins/ruby/kubelet_utils.rb | 10 +- .../ruby/kubernetes_container_inventory.rb | 71 ++- test/e2e/e2e-tests.yaml | 178 ++++++++ test/e2e/src/common/arm_rest_utility.py | 25 + test/e2e/src/common/constants.py | 119 +++++ test/e2e/src/common/helm_utility.py | 68 +++ .../common/kubernetes_configmap_utility.py | 8 + test/e2e/src/common/kubernetes_crd_utility.py | 27 ++ .../common/kubernetes_daemonset_utility.py | 36 ++ .../common/kubernetes_deployment_utility.py | 38 ++ .../common/kubernetes_namespace_utility.py | 32 ++ .../e2e/src/common/kubernetes_node_utility.py | 12 + test/e2e/src/common/kubernetes_pod_utility.py | 65 +++ .../src/common/kubernetes_secret_utility.py | 26 ++ .../src/common/kubernetes_service_utility.py | 19 + .../src/common/kubernetes_version_utility.py | 9 + test/e2e/src/common/results_utility.py | 24 + test/e2e/src/core/Dockerfile | 17 + test/e2e/src/core/conftest.py | 90 ++++ test/e2e/src/core/e2e_tests.sh | 26 ++ test/e2e/src/core/helper.py | 429 ++++++++++++++++++ test/e2e/src/core/pytest.ini | 4 + test/e2e/src/tests/test_ds_workflows.py | 60 +++ test/e2e/src/tests/test_e2e_workflows.py | 330 ++++++++++++++ .../tests/test_node_metrics_e2e_workflow.py | 420 +++++++++++++++++ .../tests/test_pod_metrics_e2e_workflow.py | 134 ++++++ test/e2e/src/tests/test_resource_status.py | 43 ++ test/e2e/src/tests/test_rs_workflows.py | 93 ++++ 71 files changed, 3596 insertions(+), 327 deletions(-) create mode 100755 .pipelines/update-place-holdres-in-e2e-tests.sh create mode 100644 .pipelines/validate-e2e-tests-results.sh create mode 100644 charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml mode change 100644 => 100755 kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh create mode 100644 scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json create mode 100644 scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json create mode 100644 scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json create mode 100644 scripts/onboarding/clusteruser/cluster-user-role-binding.yaml create mode 100644 scripts/onboarding/clusteruser/cluster-user-role.yaml create mode 100644 scripts/onboarding/enable-monitoring-using-policy.md create mode 100644 scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json create mode 100644 scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json create mode 100644 test/e2e/e2e-tests.yaml create mode 100644 test/e2e/src/common/arm_rest_utility.py create mode 100644 test/e2e/src/common/constants.py create mode 100644 test/e2e/src/common/helm_utility.py create mode 100644 test/e2e/src/common/kubernetes_configmap_utility.py create mode 100644 test/e2e/src/common/kubernetes_crd_utility.py create mode 100644 test/e2e/src/common/kubernetes_daemonset_utility.py create mode 100644 test/e2e/src/common/kubernetes_deployment_utility.py create mode 100644 test/e2e/src/common/kubernetes_namespace_utility.py create mode 100644 test/e2e/src/common/kubernetes_node_utility.py create mode 100644 test/e2e/src/common/kubernetes_pod_utility.py create mode 100644 test/e2e/src/common/kubernetes_secret_utility.py create mode 100644 test/e2e/src/common/kubernetes_service_utility.py create mode 100644 test/e2e/src/common/kubernetes_version_utility.py create mode 100644 test/e2e/src/common/results_utility.py create mode 100644 test/e2e/src/core/Dockerfile create mode 100644 test/e2e/src/core/conftest.py create mode 100644 test/e2e/src/core/e2e_tests.sh create mode 100755 test/e2e/src/core/helper.py create mode 100644 test/e2e/src/core/pytest.ini create mode 100755 test/e2e/src/tests/test_ds_workflows.py create mode 100755 test/e2e/src/tests/test_e2e_workflows.py create mode 100755 test/e2e/src/tests/test_node_metrics_e2e_workflow.py create mode 100755 test/e2e/src/tests/test_pod_metrics_e2e_workflow.py create mode 100755 test/e2e/src/tests/test_resource_status.py create mode 100755 test/e2e/src/tests/test_rs_workflows.py diff --git a/.pipelines/update-place-holdres-in-e2e-tests.sh b/.pipelines/update-place-holdres-in-e2e-tests.sh new file mode 100755 index 000000000..5fec73684 --- /dev/null +++ b/.pipelines/update-place-holdres-in-e2e-tests.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +echo "start: update placeholders of e2e-tests.yaml ..." + +for ARGUMENT in "$@" +do + KEY=$(echo $ARGUMENT | cut -f1 -d=) + VALUE=$(echo $ARGUMENT | cut -f2 -d=) + + case "$KEY" in + TENANT_ID) TENANT_ID=$VALUE ;; + *) + esac +done + +echo "start: read appid and appsecret" +# used the same SP which used for acr +CLIENT_ID=$(cat ~/acrappid) +CLIENT_SECRET=$(cat ~/acrappsecret) +echo "end: read appid and appsecret" + +echo "Service Principal CLIENT_ID:$CLIENT_ID" +echo "replace CLIENT_ID value" +sed -i "s=SP_CLIENT_ID_VALUE=$CLIENT_ID=g" e2e-tests.yaml + +# only uncomment for debug purpose +# echo "Service Principal CLIENT_SECRET:$CLIENT_SECRET" +echo "replace CLIENT_SECRET value" +sed -i "s=SP_CLIENT_SECRET_VALUE=$CLIENT_SECRET=g" e2e-tests.yaml + +echo "Service Principal TENANT_ID:$TENANT_ID" +echo "replace TENANT_ID value" +sed -i "s=SP_TENANT_ID_VALUE=$TENANT_ID=g" e2e-tests.yaml + +echo "end: update placeholders of e2e-tests.yaml." diff --git a/.pipelines/validate-e2e-tests-results.sh b/.pipelines/validate-e2e-tests-results.sh new file mode 100644 index 000000000..c38fa0f50 --- /dev/null +++ b/.pipelines/validate-e2e-tests-results.sh @@ -0,0 +1,71 @@ +#!/bin/bash +echo "start: validating results of e2e-tests ..." +DEFAULT_SONOBUOY_VERSION="0.20.0" +DEFAULT_TIME_OUT_IN_MINS=60 +for ARGUMENT in "$@" +do + KEY=$(echo $ARGUMENT | cut -f1 -d=) + VALUE=$(echo $ARGUMENT | cut -f2 -d=) + + case "$KEY" in + SONOBUOY_VERSION) SONOBUOY_VERSION=$VALUE ;; + *) + esac +done + +if [ -z $SONOBUOY_VERSION ]; then + SONOBUOY_VERSION=$DEFAULT_SONOBUOY_VERSION +fi + +echo "sonobuoy version: ${SONOBUOY_VERSION}" + +echo "start: downloading sonobuoy" +curl -LO https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION}_linux_amd64.tar.gz +echo "end: downloading sonobuoy" + +echo "start: extract sonobuoy tar file" +mkdir -p sonobuoy-install/ +tar -zxf sonobuoy_${SONOBUOY_VERSION}_*.tar.gz -C sonobuoy-install/ +echo "end: extract sonobuoy tar file" + +echo "start: move sonobuoy binaries to /usr/local/bin/" +mv -f sonobuoy-install/sonobuoy /usr/local/bin/ +echo "end: move sonobuoy binaries to /usr/local/bin/" + +rm -rf sonobuoy_${SONOBUOY_VERSION}_*.tar.gz sonobuoy-install/ + +results=$(sonobuoy retrieve) +mins=0 +IsSucceeded=true +while [ $mins -le $DEFAULT_TIME_OUT_IN_MINS ] +do + # check the status + echo "checking test status" + status=$(sonobuoy status) + status=$(echo $status | sed 's/`//g') + if [[ $status == *"completed"* ]]; then + echo "test run completed" + mins=$DEFAULT_TIME_OUT_IN_MINS + if [[ $status == *"failed"* ]]; then + IsSucceeded=false + fi + else + echo "sleep for 1m to check the status again" + sleep 1m + fi + mins=$(( $mins + 1 )) +done +echo "status:${IsSucceeded}" + +results=$(sonobuoy retrieve) +sonobuoy results $results + +if $IsSucceeded == true; then + echo "all test passed" + exit 0 +else + echo "tests are failed. please review the results by downloading tar file via sonobuoy retrieve command" + exit 1 +fi + +echo "end: validating results of e2e-tests ..." diff --git a/README.md b/README.md index 3eec1f344..3564345ee 100644 --- a/README.md +++ b/README.md @@ -91,6 +91,7 @@ The general directory structure is: │ │ | ... - plugins in, out and filters code in ruby │ ├── toml-parser/ - code for parsing of toml configuration files ├── test/ - source code for tests +│ ├── e2e/ - e2e tests to validate agent and e2e workflow(s) │ ├── unit-tests/ - unit tests code │ ├── scenario/ - scenario tests code ├── !_README.md - this file @@ -271,6 +272,36 @@ For DEV and PROD branches, automatically deployed latest yaml with latest agent # E2E Tests +## For executing tests + +1. Deploy the omsagent.yaml with your agent image. In the yaml, make sure `ISTEST` environment variable set to `true` if its not set already +2. Update the Service Principal CLIENT_ID, CLIENT_SECRET and TENANT_ID placeholder values and apply e2e-tests.yaml to execute the tests + > Note: Service Principal requires reader role on log analytics workspace and cluster resource to query LA and metrics + ``` + cd ~/Docker-Provider/test/e2e # based on your repo path + kubectl apply -f e2e-tests.yaml # this will trigger job to run the tests in sonobuoy namespace + kubectl get po -n sonobuoy # to check the pods and jobs associated to tests + ``` +3. Download (sonobuoy)[https://github.com/vmware-tanzu/sonobuoy/releases] on your dev box to view the results of the tests + ``` + results=$(sonobuoy retrieve) # downloads tar file which has logs and test results + sonobuoy results $results # get the summary of the results + tar -xzvf # extract downloaded tar file and look for pod logs, results and other k8s resources if there are any failures + ``` + +## For adding new tests + +1. Add the test python file with your test code under `tests` directory +2. Build the docker image, recommended to use ACR & MCR + ``` + cd ~/Docker-Provider/test/e2e/src # based on your repo path + docker login -u -p # login to acr + docker build -f ./core/Dockerfile -t /: . + docker push /: + ``` +3. update existing agentest image tag in e2e-tests.yaml with newly built image tag with MCR repo + +# Scenario Tests Clusters are used in release pipeline already has the yamls under test\scenario deployed. Make sure to validate these scenarios. If you have new interesting scenarios, please add/update them. diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 2afe16481..80d6f188d 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -10,6 +10,20 @@ additional questions or comments. ## Release History Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 02/23/2021 - +##### Version microsoft/oms:ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021 (linux) +##### Version microsoft/oms:win-ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021 (windows) +##### Code change log +- ContainerLogV2 schema support for LogAnalytics & ADX (not usable externally yet) +- Fix nodemetrics (cpuusageprecentage & memoryusagepercentage) metrics not flowing. This is fixed upstream for k8s versions >= 1.19.7 and >=1.20.2. +- Fix cpu & memory usage exceeded threshold container metrics not flowing when requests and/or limits were not set +- Mute some unused exceptions from going to telemetry +- Collect containerimage (repository, image & imagetag) from spec (instead of runtime) +- Add support for extension MSI for k8s arc +- Use cloud specific instrumentation keys for telemetry +- Picked up newer version for apt +- Add priority class to daemonset (in our chart only) + ### 01/11/2021 - ##### Version microsoft/oms:ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021 (linux) ##### Version microsoft/oms:win-ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021 (windows) @@ -27,68 +41,6 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Enable ADX route for windows container logs - Remove logging to termination log in windows agent liveness probe - -### 11/09/2020 - -##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) -##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) -##### Code change log -- Fix for duplicate windows metrics - -### 10/27/2020 - -##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) -##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) -##### Code change log -- Activate oneagent in few AKS regions (koreacentral,norwayeast) -- Disable syslog -- Fix timeout for Windows daemonset liveness probe -- Make request == limit for Windows daemonset resources (cpu & memory) -- Schema v2 for container log (ADX only - applicable only for select customers for piloting) - -### 10/05/2020 - -##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) -##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) -##### Code change log -- Health CRD to version v1 (from v1beta1) for k8s versions >= 1.19.0 -- Collection of PV usage metrics for PVs mounted by pods (kube-system pods excluded by default)(doc-link-needed) -- Zero fill few custom metrics under a timer, also add zero filling for new PV usage metrics -- Collection of additional Kubelet metrics ('kubelet_running_pod_count','volume_manager_total_volumes','kubelet_node_config_error','process_resident_memory_bytes','process_cpu_seconds_total','kubelet_runtime_operations_total','kubelet_runtime_operations_errors_total'). This also includes updates to 'kubelet' workbook to include these new metrics -- Collection of Azure NPM (Network Policy Manager) metrics (basic & advanced. By default, NPM metrics collection is turned OFF)(doc-link-needed) -- Support log collection when docker root is changed with knode. Tracked by [this](https://github.com/Azure/AKS/issues/1373) issue -- Support for Pods in 'Terminating' state for nodelost scenarios -- Fix for reduction in telemetry for custom metrics ingestion failures -- Fix CPU capacity/limits metrics being 0 for Virtual nodes (VK) -- Add new custom metric regions (eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth) -- Enable strict SSL validation for AppInsights Ruby SDK -- Turn off custom metrics upload for unsupported cluster types -- Install CA certs from wire server for windows (in certain clouds) - -### 09/16/2020 - -> Note: This agent release targetted ONLY for non-AKS clusters via Azure Monitor for containers HELM chart update -##### Version microsoft/oms:ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020 (linux) -##### Version microsoft/oms:win-ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020 (windows) -##### Code change log -- Collection of Azure Network Policy Manager Basic and Advanced metrics -- Add support in Windows Agent for Container log collection of CRI runtimes such as ContainerD -- Alertable metrics support Arc K8s cluster to parity with AKS -- Support for multiple container log mount paths when docker is updated through knode -- Bug fix related to MDM telemetry - -### 08/07/2020 - -##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) -##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) -##### Code change log -- Collection of KubeState metrics for deployments and HPA -- Add the Proxy support for Windows agent -- Fix for ContainerState in ContainerInventory to handle Failed state and collection of environment variables for terminated and failed containers -- Change /spec to /metrics/cadvisor endpoint to collect node capacity metrics -- Disable Health Plugin by default and can enabled via configmap -- Pin version of jq to 1.5+dfsg-2 -- Bug fix for showing node as 'not ready' when there is disk pressure -- oneagent integration (disabled by default) -- Add region check before sending alertable metrics to MDM -- Telemetry fix for agent telemetry for sov. clouds - - ### 11/09/2020 - ##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) ##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) @@ -97,7 +49,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t ### 10/27/2020 - ##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) -##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020 (windows) ##### Code change log - Activate oneagent in few AKS regions (koreacentral,norwayeast) - Disable syslog diff --git a/build/common/installer/scripts/tomlparser.rb b/build/common/installer/scripts/tomlparser.rb index fe26f639e..a0f3c2f0a 100644 --- a/build/common/installer/scripts/tomlparser.rb +++ b/build/common/installer/scripts/tomlparser.rb @@ -23,6 +23,7 @@ @logExclusionRegexPattern = "(^((?!stdout|stderr).)*$)" @excludePath = "*.csv2" #some invalid path @enrichContainerLogs = false +@containerLogSchemaVersion = "" @collectAllKubeEvents = false @containerLogsRoute = "" @@ -138,6 +139,16 @@ def populateSettingValuesFromConfigMap(parsedConfig) ConfigParseErrorLogger.logError("Exception while reading config map settings for cluster level container log enrichment - #{errorStr}, using defaults, please check config map for errors") end + #Get container log schema version setting + begin + if !parsedConfig[:log_collection_settings][:schema].nil? && !parsedConfig[:log_collection_settings][:schema][:containerlog_schema_version].nil? + @containerLogSchemaVersion = parsedConfig[:log_collection_settings][:schema][:containerlog_schema_version] + puts "config::Using config map setting for container log schema version" + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for container log schema version - #{errorStr}, using defaults, please check config map for errors") + end + #Get kube events enrichment setting begin if !parsedConfig[:log_collection_settings][:collect_all_kube_events].nil? && !parsedConfig[:log_collection_settings][:collect_all_kube_events][:enabled].nil? @@ -200,6 +211,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export AZMON_CLUSTER_CONTAINER_LOG_ENRICH=#{@enrichContainerLogs}\n") file.write("export AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS=#{@collectAllKubeEvents}\n") file.write("export AZMON_CONTAINER_LOGS_ROUTE=#{@containerLogsRoute}\n") + file.write("export AZMON_CONTAINER_LOG_SCHEMA_VERSION=#{@containerLogSchemaVersion}\n") # Close file after writing all environment variables file.close puts "Both stdout & stderr log collection are turned off for namespaces: '#{@excludePath}' " @@ -246,6 +258,8 @@ def get_command_windows(env_variable_name, env_variable_value) file.write(commands) commands = get_command_windows('AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE', @containerLogsRoute) file.write(commands) + commands = get_command_windows('AZMON_CONTAINER_LOG_SCHEMA_VERSION', @containerLogSchemaVersion) + file.write(commands) # Close file after writing all environment variables file.close diff --git a/build/version b/build/version index 711a96921..2da3efa39 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=12 +CONTAINER_BUILDVERSION_MAJOR=13 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210111 +CONTAINER_BUILDVERSION_DATE=20210223 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index a809a4e69..ce64fd1ce 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.8.0 +version: 2.8.1 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/README.md b/charts/azuremonitor-containers/README.md index 469fac94a..a3f17b509 100644 --- a/charts/azuremonitor-containers/README.md +++ b/charts/azuremonitor-containers/README.md @@ -93,6 +93,7 @@ The following table lists the configurable parameters of the MSOMS chart and the | `omsagent.env.clusterName` | Name of your cluster | Does not have a default value, needs to be provided | | `omsagent.rbac` | rbac enabled/disabled | true (i.e.enabled) | | `omsagent.proxy` | Proxy endpoint | Doesnt have default value. Refer to [configure proxy](#Configuring-Proxy-Endpoint) | +| `omsagent.priority` | DaemonSet Pod Priority | This is the [priority](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) to use for the daemonsets such that they get scheduled onto the node ahead of "normal" pods - must be an integer, defaults to 10 | > Note: For Azure Manage K8s clusters such as Azure Arc K8s and ARO v4, `omsagent.env.clusterId` with fully qualified azure resource id of the cluster should be used instead of `omsagent.env.clusterName` @@ -100,6 +101,7 @@ The following table lists the configurable parameters of the MSOMS chart and the - Parameter `omsagent.env.doNotCollectKubeSystemLogs` has been removed starting chart version 1.0.0. Refer to 'Agent data collection settings' section below to configure it using configmap. - onboarding of multiple clusters with the same cluster name to same log analytics workspace not supported. If need this configuration, use the cluster FQDN name rather than cluster dns prefix to avoid collision with clusterName +- The `omsagent.priority` parameter sets the priority of the omsagent daemonset priority class. This pod priority class is used for daemonsets to allow them to have priority over pods that can be scheduled elsewhere. Without a priority class, it is possible for a node to fill up with "normal" pods before the daemonset pods get to be created for the node or get scheduled. Note that pods are not "daemonset" pods - they are just pods created by the daemonset controller but they have a specific affinity set during creation to the specific node each pod was created to run on. You want this value to be greater than 0 (default is 10) and generally greater than pods that have the flexibility to run on different nodes such that they do not block the node specific pods. ## Agent data collection settings diff --git a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml index ebdd5ea3f..b7482b8b5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-arc-k8s-crd.yaml @@ -1,4 +1,18 @@ {{- if or ( contains "microsoft.kubernetes/connectedclusters" (.Values.Azure.Cluster.ResourceId | lower) ) ( contains "microsoft.kubernetes/connectedclusters" (.Values.omsagent.env.clusterId | lower)) }} +#extension model +{{- if not (empty .Values.Azure.Extension.Name) }} +apiVersion: clusterconfig.azure.com/v1beta1 +kind: AzureExtensionIdentity +metadata: + name: {{ .Values.Azure.Extension.Name }} + namespace: azure-arc +spec: + serviceAccounts: + - name: omsagent + namespace: kube-system + tokenNamespace: azure-arc +--- +{{- end }} apiVersion: clusterconfig.azure.com/v1beta1 kind: AzureClusterIdentityRequest metadata: @@ -6,4 +20,7 @@ metadata: namespace: azure-arc spec: audience: https://monitoring.azure.com/ + {{- if not (empty .Values.Azure.Extension.Name) }} + resourceId: {{ .Values.Azure.Extension.Name }} + {{- end }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 81003c704..82d210f3d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -27,10 +27,11 @@ spec: checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} spec: - dnsConfig: + priorityClassName: omsagent + dnsConfig: options: - name: ndots - value: "3" + value: "3" {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} nodeSelector: kubernetes.io/os: windows diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 3d29ede42..615cd0485 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -28,10 +28,11 @@ spec: checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: - dnsConfig: + priorityClassName: omsagent + dnsConfig: options: - name: ndots - value: "3" + value: "3" {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent {{- end }} @@ -70,6 +71,10 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + {{- if not (empty .Values.Azure.Extension.Name) }} + - name: ARC_K8S_EXTENSION_NAME + value: {{ .Values.Azure.Extension.Name | quote }} + {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" {{- if .Values.omsagent.logsettings.logflushintervalsecs }} @@ -84,6 +89,8 @@ spec: - name: FBIT_TAIL_BUFFER_MAX_SIZE value: {{ .Values.omsagent.logsettings.tailbufmaxsizemegabytes | quote }} {{- end }} + - name: ISTEST + value: {{ .Values.omsagent.ISTEST | quote }} securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 8609d25c9..012dd2720 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -67,8 +67,14 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + {{- if not (empty .Values.Azure.Extension.Name) }} + - name: ARC_K8S_EXTENSION_NAME + value: {{ .Values.Azure.Extension.Name | quote }} + {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" + - name: ISTEST + value: {{ .Values.omsagent.ISTEST | quote }} securityContext: privileged: true ports: diff --git a/charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml b/charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml new file mode 100644 index 000000000..4d9980ab3 --- /dev/null +++ b/charts/azuremonitor-containers/templates/omsagent-priorityclass.yaml @@ -0,0 +1,22 @@ +{{- if and (ne .Values.omsagent.secret.key "") (ne .Values.omsagent.secret.wsid "") (or (ne .Values.omsagent.env.clusterName "") (ne .Values.omsagent.env.clusterId "") (ne .Values.Azure.Cluster.ResourceId "") )}} +# This pod priority class is used for daemonsets to allow them to have priority +# over pods that can be scheduled elsewhere. Without a priority class, it is +# possible for a node to fill up with pods before the daemonset pods get to be +# created for the node or get scheduled. Note that pods are not "daemonset" +# pods - they are just pods created by the daemonset controller but they have +# a specific affinity set during creation to the specific node each pod was +# created to run on (daemonset controller takes care of that) +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: omsagent + # Priority classes don't have labels :-) + annotations: + chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: oms-agent +value: {{ .Values.omsagent.priority }} +globalDefault: false +description: "This is the daemonset priority class for omsagent" +{{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index bd4e9baf3..5db5c2dab 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -33,10 +33,14 @@ rules: verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] verbs: ["get"] +#arc k8s extension model grants access as part of the extension msi +#remove this explicit permission once the extension available in public preview +{{- if (empty .Values.Azure.Extension.Name) }} - apiGroups: [""] resources: ["secrets"] resourceNames: ["container-insights-clusteridentityrequest-token"] verbs: ["get"] +{{- end }} --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 52e80dff8..afe789d56 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -4,19 +4,40 @@ ## Microsoft OMS Agent image for kubernetes cluster monitoring ## ref: https://github.com/microsoft/Docker-Provider/tree/ci_prod -## Values of ResourceId and Region under Azure->Cluster being populated by Azure Arc K8s RP during the installation of the extension +## Values of under Azure are being populated by Azure Arc K8s RP during the installation of the extension Azure: Cluster: Region: ResourceId: + Extension: + Name: "" + ResourceId: "" omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod01112021" - tagWindows: "win-ciprod01112021" + tag: "ciprod02232021" + tagWindows: "win-ciprod02232021" pullPolicy: IfNotPresent - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" agentVersion: "1.10.0.1" + + # The priority used by the omsagent priority class for the daemonset pods + # Note that this is not execution piority - it is scheduling priority, as + # in getting scheduled to the node. This needs to be greater than 0 such + # that the daemonset pods, which can not schedule onto different nodes as + # they are defined to run on specific nodes, are not accidentally frozen + # out of a node due to other pods showing up earlier in scheduling. + # (DaemonSet pods by definition only are created once the node exists for + # them to be created for and thus it is possible to have "normal" pods + # already in line to run on the node before the DeamonSet controller got a + # chance to build pod for the node and give it to the scheduler) + # Should be some number greater than default (0) + priority: 10 + + # This used for running agent pods in test mode. + # if set to true additional agent workflow logs will be emitted which are used for e2e and arc k8s conformance testing + ISTEST: false + ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. @@ -82,6 +103,21 @@ omsagent: operator: NotIn values: - virtual-kubelet + nodeSelectorTerms: + - labelSelector: + matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/arch + operator: In + values: + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -93,10 +129,10 @@ omsagent: operator: NotIn values: - virtual-kubelet - - key: beta.kubernetes.io/arch + - key: beta.kubernetes.io/arch operator: In values: - - amd64 + - amd64 deployment: affinity: nodeAffinity: @@ -125,10 +161,10 @@ omsagent: operator: NotIn values: - master - - key: kubernetes.io/arch + - key: kubernetes.io/arch operator: In values: - - amd64 + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: @@ -144,10 +180,10 @@ omsagent: operator: NotIn values: - master - - key: beta.kubernetes.io/arch + - key: beta.kubernetes.io/arch operator: In values: - - amd64 + - amd64 ## Configure resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 2e1118922..bee718a31 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod01112021 +ARG IMAGE_TAG=ciprod02232021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh b/kubernetes/linux/dockerbuild/build-and-publish-docker-image.sh old mode 100644 new mode 100755 diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 2f88c07ac..c4067f25e 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -161,6 +161,39 @@ fi export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT echo "export CLOUD_ENVIRONMENT=$CLOUD_ENVIRONMENT" >> ~/.bashrc +# Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) +if [ ${#APPLICATIONINSIGHTS_AUTH_URL} -ge 1 ]; then # (check if APPLICATIONINSIGHTS_AUTH_URL has length >=1) + for BACKOFF in {1..4}; do + KEY=$(curl -sS $APPLICATIONINSIGHTS_AUTH_URL ) + # there's no easy way to get the HTTP status code from curl, so just check if the result is well formatted + if [[ $KEY =~ ^[A-Za-z0-9=]+$ ]]; then + break + else + sleep $((2**$BACKOFF / 4)) # (exponential backoff) + fi + done + + # validate that the retrieved data is an instrumentation key + if [[ $KEY =~ ^[A-Za-z0-9=]+$ ]]; then + export APPLICATIONINSIGHTS_AUTH=$(echo $KEY) + echo "export APPLICATIONINSIGHTS_AUTH=$APPLICATIONINSIGHTS_AUTH" >> ~/.bashrc + echo "Using cloud-specific instrumentation key" + else + # no ikey can be retrieved. Disable telemetry and continue + export DISABLE_TELEMETRY=true + echo "export DISABLE_TELEMETRY=true" >> ~/.bashrc + echo "Could not get cloud-specific instrumentation key (network error?). Disabling telemetry" + fi +fi + + +aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) +export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey +echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc + +source ~/.bashrc + + #Parse the configmap to set the right environment variables. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb @@ -190,15 +223,6 @@ cat integration_npm_config_env_var | while read line; do done source integration_npm_config_env_var -#Parse the configmap to set the right environment variables for network policy manager (npm) integration. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb - -cat integration_npm_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source integration_npm_config_env_var - #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset if [ ! -e "/etc/config/kube.conf" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb @@ -521,6 +545,7 @@ if [ ! -e "/etc/config/kube.conf" ]; then echo "starting mdsd ..." mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 fi fi @@ -589,11 +614,6 @@ echo "export HOST_ETC=/hostfs/etc" >> ~/.bashrc export HOST_VAR=/hostfs/var echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc -aikey=$(echo $APPLICATIONINSIGHTS_AUTH | base64 --decode) -export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey -echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc - -source ~/.bashrc #start telegraf /opt/telegraf --config $telegrafConfFile & diff --git a/kubernetes/linux/mdsd.xml b/kubernetes/linux/mdsd.xml index 76d2104fc..49d329791 100644 --- a/kubernetes/linux/mdsd.xml +++ b/kubernetes/linux/mdsd.xml @@ -48,20 +48,31 @@ --> - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + - + + @@ -97,15 +108,22 @@ priority events to be delivered sooner than the next five-minute interval. --> - - - - + + + + + + + + + - @@ -118,7 +136,16 @@ - ]]> + ]]> + + + + + + + + + ]]> diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 352be06d7..fe6c0565a 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -2,8 +2,8 @@ TMPDIR="/opt" cd $TMPDIR #Download utf-8 encoding capability on the omsagent container. - -apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y locales +#upgrade apt to latest version +apt-get update && apt-get install -y apt && DEBIAN_FRONTEND=noninteractive apt-get install -y locales sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 67bd9cdde..ebf0257af 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" imagePullPolicy: IfNotPresent resources: limits: @@ -383,6 +383,9 @@ spec: value: "VALUE_AKS_RESOURCE_ID_VALUE" - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" + # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests + - name: ISTEST + value: "true" #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" @@ -521,13 +524,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" imagePullPolicy: IfNotPresent resources: limits: @@ -541,6 +544,9 @@ spec: value: "VALUE_AKS_RESOURCE_ID_VALUE" - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" + # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests + - name: ISTEST + value: "true" # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" @@ -675,7 +681,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "12.0.0-0" + dockerProviderVersion: "13.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -685,7 +691,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index f852bd236..d4f118449 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod01112021 +ARG IMAGE_TAG=win-ciprod02232021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index a297e3801..722392157 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -119,10 +119,48 @@ function Set-EnvironmentVariables { $env:AZMON_AGENT_CFG_SCHEMA_VERSION } - # Set environment variable for TELEMETRY_APPLICATIONINSIGHTS_KEY - $aiKey = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($env:APPLICATIONINSIGHTS_AUTH)) - [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKey, "Process") - [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKey, "Machine") + # Check if the instrumentation key needs to be fetched from a storage account (as in airgapped clouds) + $aiKeyURl = [System.Environment]::GetEnvironmentVariable('APPLICATIONINSIGHTS_AUTH_URL') + if ($aiKeyURl) { + $aiKeyFetched = "" + # retry up to 5 times + for( $i = 1; $i -le 4; $i++) { + try { + $response = Invoke-WebRequest -uri $aiKeyURl -UseBasicParsing -TimeoutSec 5 -ErrorAction:Stop + + if ($response.StatusCode -ne 200) { + Write-Host "Expecting reponse code 200, was: $($response.StatusCode), retrying" + Start-Sleep -Seconds ([MATH]::Pow(2, $i) / 4) + } + else { + $aiKeyFetched = $response.Content + break + } + } + catch { + Write-Host "Exception encountered fetching instrumentation key:" + Write-Host $_.Exception + } + } + + # Check if the fetched IKey was properly encoded. if not then turn off telemetry + if ($aiKeyFetched -match '^[A-Za-z0-9=]+$') { + Write-Host "Using cloud-specific instrumentation key" + [System.Environment]::SetEnvironmentVariable("APPLICATIONINSIGHTS_AUTH", $aiKeyFetched, "Process") + [System.Environment]::SetEnvironmentVariable("APPLICATIONINSIGHTS_AUTH", $aiKeyFetched, "Machine") + } + else { + # Couldn't fetch the Ikey, turn telemetry off + Write-Host "Could not get cloud-specific instrumentation key (network error?). Disabling telemetry" + [System.Environment]::SetEnvironmentVariable("DISABLE_TELEMETRY", "True", "Process") + [System.Environment]::SetEnvironmentVariable("DISABLE_TELEMETRY", "True", "Machine") + } + } + + $aiKeyDecoded = [System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($env:APPLICATIONINSIGHTS_AUTH)) + [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Process") + [System.Environment]::SetEnvironmentVariable("TELEMETRY_APPLICATIONINSIGHTS_KEY", $aiKeyDecoded, "Machine") + # run config parser ruby /opt/omsagentwindows/scripts/ruby/tomlparser.rb @@ -324,7 +362,3 @@ Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | For #check if fluentd service is running Get-Service fluentdwinaks - - - - diff --git a/scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json b/scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json new file mode 100644 index 000000000..c68bfed17 --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-azure-policy/azure-policy.json @@ -0,0 +1,113 @@ +{ + "mode": "Indexed", + "policyRule": { + "if": { + "field": "type", + "equals": "Microsoft.ContainerService/managedClusters" + }, + "then": { + "effect": "deployIfNotExists", + "details": { + "type": "Microsoft.ContainerService/managedClusters", + "name": "[field('name')]", + "roleDefinitionIds": [ + "/providers/Microsoft.Authorization/roleDefinitions/ed7f3fbd-7b88-4dd4-9017-9adb7ce333f8", + "/providers/Microsoft.Authorization/roleDefinitions/92aaf0da-9dab-42b6-94a3-d43ce8d16293" + ], + "existenceCondition": { + "field": "Microsoft.ContainerService/managedClusters/addonProfiles.omsagent.enabled", + "equals": "true" + }, + "deployment": { + "properties": { + "mode": "incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterName": { + "type": "string" + }, + "clusterResourceGroupName": { + "type": "string" + }, + "clusterLocation": { + "type": "string" + }, + "clusterTags": { + "type": "object" + }, + "workspaceResourceId": { + "type": "string" + } + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-policy', '-', uniqueString(parameters('clusterName')))]", + "apiVersion": "2019-05-01", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "name": "[parameters('clusterName')]", + "type": "Microsoft.ContainerService/managedClusters", + "location": "[parameters('clusterLocation')]", + "tags": "[parameters('clusterTags')]", + "apiVersion": "2018-03-31", + "properties": { + "mode": "Incremental", + "id": "[resourceId(parameters('clusterResourceGroupName'), 'Microsoft.ContainerService/managedClusters', parameters('clusterName'))]", + "addonProfiles": { + "omsagent": { + "enabled": true, + "config": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]" + } + } + } + } + } + ] + } + } + } + ] + }, + "parameters": { + "clusterName": { + "value": "[field('name')]" + }, + "clusterResourceGroupName": { + "value": "[resourceGroup().name]" + }, + "clusterLocation": { + "value": "[field('location')]" + }, + "clusterTags": { + "value": "[field('tags')]" + }, + "workspaceResourceId": { + "value": "[parameters('workspaceResourceId')]" + } + } + } + } + } + } + }, + "parameters": { + "workspaceResourceId": { + "type": "String", + "metadata": { + "displayName": "Resource Id of the existing Azure Log Analytics Workspace", + "description": "Azure Monitor Log Analytics Resource ID" + } + } + } +} diff --git a/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json new file mode 100644 index 000000000..6281cdade --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.parameters.json @@ -0,0 +1,9 @@ +{ + "workspaceResourceId": { + "type": "string", + "metadata": { + "displayName": "Resource Id of the existing Azure Log Analytics Workspace", + "description": "Azure Monitor Log Analytics Resource ID" + } + } +} diff --git a/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json new file mode 100644 index 000000000..a113441ce --- /dev/null +++ b/scripts/onboarding/aks/onboarding-using-azure-policy/azurepolicy.rules.json @@ -0,0 +1,101 @@ +{ + "if": { + "field": "type", + "equals": "Microsoft.ContainerService/managedClusters" + }, + "then": { + "effect": "deployIfNotExists", + "details": { + "type": "Microsoft.ContainerService/managedClusters", + "name": "[field('name')]", + "roleDefinitionIds": [ + "/providers/Microsoft.Authorization/roleDefinitions/ed7f3fbd-7b88-4dd4-9017-9adb7ce333f8", + "/providers/Microsoft.Authorization/roleDefinitions/92aaf0da-9dab-42b6-94a3-d43ce8d16293" + ], + "existenceCondition": { + "field": "Microsoft.ContainerService/managedClusters/addonProfiles.omsagent.enabled", + "equals": "true" + }, + "deployment": { + "properties": { + "mode": "incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterName": { + "type": "string" + }, + "clusterResourceGroupName": { + "type": "string" + }, + "clusterLocation": { + "type": "string" + }, + "clusterTags": { + "type": "object" + }, + "workspaceResourceId": { + "type": "string" + } + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('aks-monitoring-policy', '-', uniqueString(parameters('clusterName')))]", + "apiVersion": "2019-05-01", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "name": "[parameters('clusterName')]", + "type": "Microsoft.ContainerService/managedClusters", + "location": "[parameters('clusterLocation')]", + "tags": "[parameters('clusterTags')]", + "apiVersion": "2018-03-31", + "properties": { + "mode": "Incremental", + "id": "[resourceId(parameters('clusterResourceGroupName'), 'Microsoft.ContainerService/managedClusters', parameters('clusterName'))]", + "addonProfiles": { + "omsagent": { + "enabled": true, + "config": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]" + } + } + } + } + } + ] + } + } + } + ] + }, + "parameters": { + "clusterName": { + "value": "[field('name')]" + }, + "clusterResourceGroupName": { + "value": "[resourceGroup().name]" + }, + "clusterLocation": { + "value": "[field('location')]" + }, + "clusterTags": { + "value": "[field('tags')]" + }, + "workspaceResourceId": { + "value": "[parameters('workspaceResourceId')]" + } + } + } + } + } + } +} diff --git a/scripts/onboarding/clusteruser/cluster-user-role-binding.yaml b/scripts/onboarding/clusteruser/cluster-user-role-binding.yaml new file mode 100644 index 000000000..fce2fc582 --- /dev/null +++ b/scripts/onboarding/clusteruser/cluster-user-role-binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: containerHealth-read-logs-global +roleRef: + kind: ClusterRole + name: containerHealth-log-reader + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: User + name: clusterUser + apiGroup: rbac.authorization.k8s.io diff --git a/scripts/onboarding/clusteruser/cluster-user-role.yaml b/scripts/onboarding/clusteruser/cluster-user-role.yaml new file mode 100644 index 000000000..b3519fdd3 --- /dev/null +++ b/scripts/onboarding/clusteruser/cluster-user-role.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: containerHealth-log-reader +rules: + - apiGroups: ["", "metrics.k8s.io", "extensions", "apps"] + resources: + - "pods/log" + - "events" + - "nodes" + - "pods" + - "deployments" + - "replicasets" + verbs: ["get", "list"] diff --git a/scripts/onboarding/enable-monitoring-using-policy.md b/scripts/onboarding/enable-monitoring-using-policy.md new file mode 100644 index 000000000..e1e395ecc --- /dev/null +++ b/scripts/onboarding/enable-monitoring-using-policy.md @@ -0,0 +1,64 @@ +# How to enable AKS Monitoring Addon via Azure Policy +This doc describes how to enable AKS Monitoring Addon using Azure Custom Policy.Monitoring Addon Custom Policy can be assigned +either at subscription or resource group scope. If Azure Log Analytics workspace and AKS cluster are in different subscriptions then Managed Identity used by Policy assignnment has to have required role permissions on both the subscriptions or least on the resource of the Azure Log Aalytics workspace. Similarly, If the policy scoped to Resource Group, then Managed Identity should have required role permissions on the Log Analytics workspace if the workspace not in the selected Resource Group scope. + +Monitoring Addon require following roles on the Managed Identity used by Azure Policy + - [azure-kubernetes-service-contributor-role](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#azure-kubernetes-service-contributor-role) + - [log-analytics-contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#log-analytics-contributor) + +## Create and Assign Policy definition using Azure Portal + +### Create Policy Definition + +1. Download the Azure Custom Policy definition to enable AKS Monitoring Addon +``` sh + curl -o azurepolicy.json -L https://aka.ms/aks-enable-monitoring-custom-policy +``` +2. Navigate to https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyMenuBlade/Definitions and create policy definition with the following details in the Policy definition create dialogue box + + - Pick any Azure Subscription where you want to store Policy Definition + - Name - '(Preview)AKS-Monitoring-Addon' + - Description - 'Azure Custom Policy to enable Monitoring Addon onto Azure Kubernetes Cluster(s) in specified scope' + - Category - Choose "use existing" and pick 'Kubernetes' from drop down + - Remove the existing sample rules and copy the contents of azurepolicy.json downloaded in step #1 above + +### Assign Policy Definition to Specified Scope + +> Note: Managed Identity will be created automatically and assigned specified roles in the Policy definition. + +3. Navigate to https://portal.azure.com/#blade/Microsoft_Azure_Policy/PolicyMenuBlade/Definitions and select the Policy Definition 'AKS Monitoring Addon' +4. Click an Assignment and select Scope, Exclusions (if any) +5. Provide the Resource Id of the Azure Log Analytics Workspace. The Resource Id should be in this format `/subscriptions//resourceGroups//providers/Microsoft.OperationalInsights/workspaces/` +6. Create Remediation task in case if you want apply to policy to existing AKS clusters in selected scope +7. Click and Review & Create Option to create Policy Assignment + +## Create and Assign Policy definition using Azure CLI + +### Create Policy Definition + +1. Download the Azure Custom Policy definition rules and parameters files + ``` sh + curl -o azurepolicy.rules.json -L https://aka.ms/aks-enable-monitoring-custom-policy-rules + curl -o azurepolicy.parameters.json -L https://aka.ms/aks-enable-monitoring-custom-policy-parameters + ``` +2. Create policy definition using below command + + ``` sh + az cloud set -n # set the Azure cloud + az login # login to cloud environment + az account set -s + az policy definition create --name "(Preview)AKS-Monitoring-Addon" --display-name "(Preview)AKS-Monitoring-Addon" --mode Indexed --metadata version=1.0.0 category=Kubernetes --rules azurepolicy.rules.json --params azurepolicy.parameters.json + ``` +### Assign Policy Definition to Specified Scope + +3. Create policy assignment + +``` sh +az policy assignment create --name aks-monitoring-addon --policy "(Preview)AKS-Monitoring-Addon" --assign-identity --identity-scope /subscriptions/ --role Contributor --scope /subscriptions/ --location --role Contributor --scope /subscriptions/ -p "{ \"workspaceResourceId\": { \"value\": \"/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/\" } }" +``` + +## References +- https://docs.microsoft.com/en-us/azure/governance/policy/ +- https://docs.microsoft.com/en-us/azure/governance/policy/how-to/remediate-resources#how-remediation-security-works +- https://docs.microsoft.com/en-us/cli/azure/install-azure-cli +- https://docs.microsoft.com/en-us/azure/azure-monitor/insights/container-insights-overview \ No newline at end of file diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 5e9cb8a25..d8ab7b345 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -50,8 +50,6 @@ param( [Parameter(mandatory = $false)] [string]$tenantId, [Parameter(mandatory = $false)] - [string]$kubeContext, - [Parameter(mandatory = $false)] [string]$azureCloudName ) @@ -66,7 +64,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.0" +$mcrChartVersion = "2.8.1" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." $omsAgentDomainName="opinsights.azure.com" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 2dc0a465f..9d0c0aca5 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -44,7 +44,7 @@ defaultAzureCloud="AzureCloud" omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.8.0" +mcrChartVersion="2.8.1" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 8826b6df6..6d14dfa5f 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.0" +mcrChartVersion="2.8.1" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json new file mode 100644 index 000000000..8ebef232a --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json @@ -0,0 +1,135 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "type": "string", + "metadata": { + "description": "Resource Id of the Azure Arc Connected Cluster" + } + }, + "clusterRegion": { + "type": "string", + "metadata": { + "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" + } + }, + "proxyEndpointUrl": { + "type": "string", + "defaultValue": "", + "metadata": { + "description": "If the cluster behind forward proxy, then specify Proxy Endpoint URL in this format: http(s)://:@:" + } + }, + "workspaceResourceId": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Resource ID" + } + }, + "workspaceRegion": { + "type": "string", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace region e.g. \"eastus\"" + } + }, + "workspaceDomain": { + "type": "string", + "allowedValues": [ + "opinsights.azure.com", + "opinsights.azure.cn", + "opinsights.azure.us", + "opinsights.azure.eaglex.ic.gov", + "opinsights.azure.microsoft.scloud" + ], + "defaultValue": "opinsights.azure.com", + "metadata": { + "description": "Azure Monitor Log Analytics Workspace Domain e.g. opinsights.azure.com" + } + } + }, + "resources": [ + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('ContainerInsights', '-', uniqueString(parameters('workspaceResourceId')))]", + "apiVersion": "2017-05-10", + "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", + "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "apiVersion": "2015-11-01-preview", + "type": "Microsoft.OperationsManagement/solutions", + "location": "[parameters('workspaceRegion')]", + "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", + "properties": { + "workspaceResourceId": "[parameters('workspaceResourceId')]" + }, + "plan": { + "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", + "product": "[Concat('OMSGallery/', 'ContainerInsights')]", + "promotionCode": "", + "publisher": "Microsoft" + } + } + ] + }, + "parameters": {} + } + }, + { + "type": "Microsoft.Resources/deployments", + "name": "[Concat('arc-k8s-ci-extension', '-', uniqueString(parameters('clusterResourceId')))]", + "apiVersion": "2019-05-01", + "subscriptionId": "[split(parameters('clusterResourceId'),'/')[2]]", + "resourceGroup": "[split(parameters('clusterResourceId'),'/')[4]]", + "dependsOn": [ + "[Concat('ContainerInsights', '-', uniqueString(parameters('workspaceResourceId')))]" + ], + "properties": { + "mode": "Incremental", + "template": { + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": {}, + "variables": {}, + "resources": [ + { + "type": "Microsoft.KubernetesConfiguration/extensions", + "apiVersion": "2020-07-01-preview", + "name": "azuremonitor-containers", + "location": "[parameters('clusterRegion')]", + "identity": {"type": "systemassigned"}, + "properties": { + "extensionType": "Microsoft.AzureMonitor.Containers", + "configurationSettings": { + "logAnalyticsWorkspaceResourceID": "[parameters('workspaceResourceId')]", + "omsagent.domain": "[parameters('workspaceDomain')]" + }, + "configurationProtectedSettings": { + "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", + "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" , + "omsagent.proxy": "[if(equals(parameters('proxyEndpointUrl'), ''), '', parameters('proxyEndpointUrl'))]" + }, + "autoUpgradeMinorVersion": true, + "releaseTrain": "Stable", + "scope": { + "Cluster": { + "releaseNamespace": "azuremonitor-containers" + } + } + }, + "scope": "[concat('Microsoft.Kubernetes/connectedClusters/', split(parameters('clusterResourceId'),'/')[8])]" + } + ] + } + } + } + ] +} diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json new file mode 100644 index 000000000..b74b5ac95 --- /dev/null +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "clusterResourceId": { + "value": "/subscriptions//resourceGroups//providers/Microsoft.Kubernetes/connectedClusters/" + }, + "clusterRegion": { + "value": "" + }, + "proxyEndpointUrl": { + "value": "" + }, + "workspaceResourceId": { + "value": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" + }, + "workspaceRegion": { + "value": "" + }, + "workspaceDomain": { + "value": "" + } + } +} diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 5a678781c..bda757eb8 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -32,13 +32,16 @@ import ( // DataType for Container Log const ContainerLogDataType = "CONTAINER_LOG_BLOB" +//DataType for Container Log v2 +const ContainerLogV2DataType = "CONTAINERINSIGHTS_CONTAINERLOGV2" + // DataType for Insights metric const InsightsMetricsDataType = "INSIGHTS_METRICS_BLOB" // DataType for KubeMonAgentEvent const KubeMonAgentEventDataType = "KUBE_MON_AGENT_EVENTS_BLOB" -//env varibale which has ResourceId for LA +//env variable which has ResourceId for LA const ResourceIdEnv = "AKS_RESOURCE_ID" //env variable which has ResourceName for NON-AKS @@ -78,20 +81,26 @@ const DaemonSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimpr const ReplicaSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimprov/out_oms.conf" const WindowsContainerLogPluginConfFilePath = "/etc/omsagentwindows/out_oms.conf" -// IPName for Container Log -const IPName = "Containers" +// IPName +const IPName = "ContainerInsights" + + const defaultContainerInventoryRefreshInterval = 60 const kubeMonAgentConfigEventFlushInterval = 60 //Eventsource name in mdsd -const MdsdSourceName = "ContainerLogSource" +const MdsdContainerLogSourceName = "ContainerLogSource" +const MdsdContainerLogV2SourceName = "ContainerLogV2Source" -//container logs route - v2 (v2=flush to oneagent, adx= flush to adx ingestion, anything else flush to ODS[default]) +//container logs route (v2=flush to oneagent, adx= flush to adx ingestion, anything else flush to ODS[default]) const ContainerLogsV2Route = "v2" const ContainerLogsADXRoute = "adx" +//container logs schema (v2=ContainerLogsV2 table in LA, anything else ContainerLogs table in LA. This is applicable only if Container logs route is NOT ADX) +const ContainerLogV2SchemaVersion = "v2" + var ( // PluginConfiguration the plugins configuration PluginConfiguration map[string]string @@ -125,6 +134,8 @@ var ( ContainerLogsRouteV2 bool // container log route for routing thru ADX ContainerLogsRouteADX bool + // container log schema (applicable only for non-ADX route) + ContainerLogSchemaV2 bool //ADX Cluster URI AdxClusterUri string // ADX clientID @@ -180,8 +191,8 @@ var ( userAgent = "" ) -// DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin -type DataItem struct { +// DataItemLAv1 == ContainerLog table in LA +type DataItemLAv1 struct { LogEntry string `json:"LogEntry"` LogEntrySource string `json:"LogEntrySource"` LogEntryTimeStamp string `json:"LogEntryTimeStamp"` @@ -193,10 +204,25 @@ type DataItem struct { Computer string `json:"Computer"` } +// DataItemLAv2 == ContainerLogV2 table in LA +// Please keep the names same as destination column names, to avoid transforming one to another in the pipeline +type DataItemLAv2 struct { + TimeGenerated string `json:"TimeGenerated"` + Computer string `json:"Computer"` + ContainerId string `json:"ContainerId"` + ContainerName string `json:"ContainerName"` + PodName string `json:"PodName"` + PodNamespace string `json:"PodNamespace"` + LogMessage string `json:"LogMessage"` + LogSource string `json:"LogSource"` + //PodLabels string `json:"PodLabels"` +} + +// DataItemADX == ContainerLogV2 table in ADX type DataItemADX struct { TimeGenerated string `json:"TimeGenerated"` Computer string `json:"Computer"` - ContainerID string `json:"ContainerID"` + ContainerId string `json:"ContainerId"` ContainerName string `json:"ContainerName"` PodName string `json:"PodName"` PodNamespace string `json:"PodNamespace"` @@ -227,10 +253,17 @@ type InsightsMetricsBlob struct { } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point -type ContainerLogBlob struct { +type ContainerLogBlobLAv1 struct { + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []DataItemLAv1 `json:"DataItems"` +} + +// ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point +type ContainerLogBlobLAv2 struct { DataType string `json:"DataType"` IPName string `json:"IPName"` - DataItems []DataItem `json:"DataItems"` + DataItems []DataItemLAv2 `json:"DataItems"` } // MsgPackEntry represents the object corresponding to a single messagepack event in the messagepack stream @@ -792,7 +825,8 @@ func UpdateNumTelegrafMetricsSentTelemetry(numMetricsSent int, numSendErrors int // PostDataHelper sends data to the ODS endpoint or oneagent or ADX func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { start := time.Now() - var dataItems []DataItem + var dataItemsLAv1 []DataItemLAv1 + var dataItemsLAv2 []DataItemLAv2 var dataItemsADX []DataItemADX var msgPackEntries []MsgPackEntry @@ -830,26 +864,42 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } stringMap = make(map[string]string) + //below id & name are used by latency telemetry in both v1 & v2 LA schemas + id := "" + name := "" logEntry := ToString(record["log"]) logEntryTimeStamp := ToString(record["time"]) - stringMap["LogEntry"] = logEntry - stringMap["LogEntrySource"] = logEntrySource - stringMap["LogEntryTimeStamp"] = logEntryTimeStamp - stringMap["SourceSystem"] = "Containers" - stringMap["Id"] = containerID - - if val, ok := imageIDMap[containerID]; ok { - stringMap["Image"] = val - } + //ADX Schema & LAv2 schema are almost the same (except resourceId) + if (ContainerLogSchemaV2 == true || ContainerLogsRouteADX == true) { + stringMap["Computer"] = Computer + stringMap["ContainerId"] = containerID + stringMap["ContainerName"] = containerName + stringMap["PodName"] = k8sPodName + stringMap["PodNamespace"] = k8sNamespace + stringMap["LogMessage"] = logEntry + stringMap["LogSource"] = logEntrySource + stringMap["TimeGenerated"] = logEntryTimeStamp + } else { + stringMap["LogEntry"] = logEntry + stringMap["LogEntrySource"] = logEntrySource + stringMap["LogEntryTimeStamp"] = logEntryTimeStamp + stringMap["SourceSystem"] = "Containers" + stringMap["Id"] = containerID + + if val, ok := imageIDMap[containerID]; ok { + stringMap["Image"] = val + } - if val, ok := nameIDMap[containerID]; ok { - stringMap["Name"] = val - } + if val, ok := nameIDMap[containerID]; ok { + stringMap["Name"] = val + } - stringMap["TimeOfCommand"] = start.Format(time.RFC3339) - stringMap["Computer"] = Computer - var dataItem DataItem + stringMap["TimeOfCommand"] = start.Format(time.RFC3339) + stringMap["Computer"] = Computer + } + var dataItemLAv1 DataItemLAv1 + var dataItemLAv2 DataItemLAv2 var dataItemADX DataItemADX var msgPackEntry MsgPackEntry @@ -866,50 +916,71 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } else if ContainerLogsRouteADX == true { if ResourceCentric == true { stringMap["AzureResourceId"] = ResourceID + } else { + stringMap["AzureResourceId"] = "" } stringMap["PodName"] = k8sPodName stringMap["PodNamespace"] = k8sNamespace stringMap["ContainerName"] = containerName dataItemADX = DataItemADX{ - TimeGenerated: stringMap["LogEntryTimeStamp"], + TimeGenerated: stringMap["TimeGenerated"], Computer: stringMap["Computer"], - ContainerID: stringMap["Id"], + ContainerId: stringMap["ContainerId"], ContainerName: stringMap["ContainerName"], PodName: stringMap["PodName"], PodNamespace: stringMap["PodNamespace"], - LogMessage: stringMap["LogEntry"], - LogSource: stringMap["LogEntrySource"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], AzureResourceId: stringMap["AzureResourceId"], } //ADX dataItemsADX = append(dataItemsADX, dataItemADX) } else { - dataItem = DataItem{ - ID: stringMap["Id"], - LogEntry: stringMap["LogEntry"], - LogEntrySource: stringMap["LogEntrySource"], - LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], - LogEntryTimeOfCommand: stringMap["TimeOfCommand"], - SourceSystem: stringMap["SourceSystem"], - Computer: stringMap["Computer"], - Image: stringMap["Image"], - Name: stringMap["Name"], + if (ContainerLogSchemaV2 == true) { + dataItemLAv2 = DataItemLAv2{ + TimeGenerated: stringMap["TimeGenerated"], + Computer: stringMap["Computer"], + ContainerId: stringMap["ContainerId"], + ContainerName: stringMap["ContainerName"], + PodName: stringMap["PodName"], + PodNamespace: stringMap["PodNamespace"], + LogMessage: stringMap["LogMessage"], + LogSource: stringMap["LogSource"], + } + //ODS-v2 schema + dataItemsLAv2 = append(dataItemsLAv2, dataItemLAv2) + name = stringMap["ContainerName"] + id = stringMap["ContainerId"] + } else { + dataItemLAv1 = DataItemLAv1{ + ID: stringMap["Id"], + LogEntry: stringMap["LogEntry"], + LogEntrySource: stringMap["LogEntrySource"], + LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], + LogEntryTimeOfCommand: stringMap["TimeOfCommand"], + SourceSystem: stringMap["SourceSystem"], + Computer: stringMap["Computer"], + Image: stringMap["Image"], + Name: stringMap["Name"], + } + //ODS-v1 schema + dataItemsLAv1 = append(dataItemsLAv1, dataItemLAv1) + name = stringMap["Name"] + id = stringMap["Id"] } - //ODS - dataItems = append(dataItems, dataItem) } - if stringMap["LogEntryTimeStamp"] != "" { - loggedTime, e := time.Parse(time.RFC3339, stringMap["LogEntryTimeStamp"]) + if logEntryTimeStamp != "" { + loggedTime, e := time.Parse(time.RFC3339, logEntryTimeStamp) if e != nil { - message := fmt.Sprintf("Error while converting LogEntryTimeStamp for telemetry purposes: %s", e.Error()) + message := fmt.Sprintf("Error while converting logEntryTimeStamp for telemetry purposes: %s", e.Error()) Log(message) SendException(message) } else { ltncy := float64(start.Sub(loggedTime) / time.Millisecond) if ltncy >= maxLatency { maxLatency = ltncy - maxLatencyContainer = dataItem.Name + "=" + dataItem.ID + maxLatencyContainer = name + "=" + id } } } @@ -919,8 +990,12 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { //flush to mdsd + mdsdSourceName := MdsdContainerLogSourceName + if (ContainerLogSchemaV2 == true) { + mdsdSourceName = MdsdContainerLogV2SourceName + } fluentForward := MsgPackForward{ - Tag: MdsdSourceName, + Tag: mdsdSourceName, Entries: msgPackEntries, } @@ -967,7 +1042,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { elapsed = time.Since(start) if er != nil { - Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(dataItems), elapsed, er.Error()) + Log("Error::mdsd::Failed to write to mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) if MdsdMsgpUnixSocketClient != nil { MdsdMsgpUnixSocketClient.Close() MdsdMsgpUnixSocketClient = nil @@ -1013,14 +1088,14 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } } - // Setup a maximum time for completion to be 15 Seconds. + // Setup a maximum time for completion to be 30 Seconds. ctx, cancel := context.WithTimeout(ParentContext, 30*time.Second) defer cancel() //ADXFlushMutex.Lock() //defer ADXFlushMutex.Unlock() //MultiJSON support is not there yet - if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogv2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { + if ingestionErr := ADXIngestor.FromReader(ctx, r, ingest.IngestionMappingRef("ContainerLogV2Mapping", ingest.JSON), ingest.FileFormat(ingest.JSON)); ingestionErr != nil { Log("Error when streaming to ADX Ingestion: %s", ingestionErr.Error()) //ADXIngestor = nil //not required as per ADX team. Will keep it to indicate that we tried this approach @@ -1035,58 +1110,75 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = len(dataItemsADX) Log("Success::ADX::Successfully wrote %d container log records to ADX in %s", numContainerLogRecords, elapsed) - } else { - //flush to ODS - if len(dataItems) > 0 { - logEntry := ContainerLogBlob{ - DataType: ContainerLogDataType, + } else { //ODS + var logEntry interface{} + recordType := "" + loglinesCount := 0 + //schema v2 + if (len(dataItemsLAv2) > 0 && ContainerLogSchemaV2 == true) { + logEntry = ContainerLogBlobLAv2{ + DataType: ContainerLogV2DataType, IPName: IPName, - DataItems: dataItems} - - marshalled, err := json.Marshal(logEntry) - if err != nil { - message := fmt.Sprintf("Error while Marshalling log Entry: %s", err.Error()) - Log(message) - SendException(message) - return output.FLB_OK + DataItems: dataItemsLAv2} + loglinesCount = len(dataItemsLAv2) + recordType = "ContainerLogV2" + } else { + //schema v1 + if len(dataItemsLAv1) > 0 { + logEntry = ContainerLogBlobLAv1{ + DataType: ContainerLogDataType, + IPName: IPName, + DataItems: dataItemsLAv1} + loglinesCount = len(dataItemsLAv1) + recordType = "ContainerLog" } + } - req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("User-Agent", userAgent) - reqId := uuid.New().String() - req.Header.Set("X-Request-ID", reqId) - //expensive to do string len for every request, so use a flag - if ResourceCentric == true { - req.Header.Set("x-ms-AzureResourceId", ResourceID) - } + marshalled, err := json.Marshal(logEntry) + //Log("LogEntry::e %s", marshalled) + if err != nil { + message := fmt.Sprintf("Error while Marshalling log Entry: %s", err.Error()) + Log(message) + SendException(message) + return output.FLB_OK + } - resp, err := HTTPClient.Do(req) - elapsed = time.Since(start) + req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", userAgent) + reqId := uuid.New().String() + req.Header.Set("X-Request-ID", reqId) + //expensive to do string len for every request, so use a flag + if ResourceCentric == true { + req.Header.Set("x-ms-AzureResourceId", ResourceID) + } + + resp, err := HTTPClient.Do(req) + elapsed = time.Since(start) - if err != nil { - message := fmt.Sprintf("Error when sending request %s \n", err.Error()) - Log(message) - // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation - //SendException(message) - Log("Failed to flush %d records after %s", len(dataItems), elapsed) + if err != nil { + message := fmt.Sprintf("Error when sending request %s \n", err.Error()) + Log(message) + // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation + //SendException(message) + + Log("Failed to flush %d records after %s", loglinesCount, elapsed) - return output.FLB_RETRY - } + return output.FLB_RETRY + } - if resp == nil || resp.StatusCode != 200 { - if resp != nil { - Log("RequestId %s Status %s Status Code %d", reqId, resp.Status, resp.StatusCode) - } - return output.FLB_RETRY + if resp == nil || resp.StatusCode != 200 { + if resp != nil { + Log("RequestId %s Status %s Status Code %d", reqId, resp.Status, resp.StatusCode) } + return output.FLB_RETRY + } - defer resp.Body.Close() - numContainerLogRecords = len(dataItems) - Log("PostDataHelper::Info::Successfully flushed %d container log records to ODS in %s", numContainerLogRecords, elapsed) + defer resp.Body.Close() + numContainerLogRecords = loglinesCount + Log("PostDataHelper::Info::Successfully flushed %d %s records to ODS in %s", numContainerLogRecords, recordType, elapsed) } - } ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() @@ -1374,10 +1466,22 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { CreateADXClient() } + ContainerLogSchemaVersion := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOG_SCHEMA_VERSION"))) + Log("AZMON_CONTAINER_LOG_SCHEMA_VERSION:%s", ContainerLogSchemaVersion) + + ContainerLogSchemaV2 = false //default is v1 schema + + if strings.Compare(ContainerLogSchemaVersion, ContainerLogV2SchemaVersion) == 0 && ContainerLogsRouteADX != true { + ContainerLogSchemaV2 = true + Log("Container logs schema=%s", ContainerLogV2SchemaVersion) + fmt.Fprintf(os.Stdout, "Container logs schema=%s... \n", ContainerLogV2SchemaVersion) + } + if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { populateExcludedStdoutNamespaces() populateExcludedStderrNamespaces() - if enrichContainerLogs == true && ContainerLogsRouteADX != true { + //enrichment not applicable for ADX and v2 schema + if enrichContainerLogs == true && ContainerLogsRouteADX != true && ContainerLogSchemaV2 != true { Log("ContainerLogEnrichment=true; starting goroutine to update containerimagenamemaps \n") go updateContainerImageNameMaps() } else { diff --git a/source/plugins/go/src/utils.go b/source/plugins/go/src/utils.go index 91791ae1a..61d047e52 100644 --- a/source/plugins/go/src/utils.go +++ b/source/plugins/go/src/utils.go @@ -145,7 +145,7 @@ func CreateADXClient() { //log.Fatalf("Unable to create ADX connection %s", err.Error()) } else { Log("Successfully created ADX Client. Creating Ingestor...") - ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLogv2") + ingestor, ingestorErr := ingest.New(client, "containerinsights", "ContainerLogV2") if ingestorErr != nil { Log("Error::mdsd::Unable to create ADX ingestor %s", ingestorErr.Error()) } else { diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index 67bd61667..d3a96d37d 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -25,6 +25,7 @@ class CAdvisorMetricsAPIClient @clusterLogTailPath = ENV["AZMON_LOG_TAIL_PATH"] @clusterAgentSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] @clusterContainerLogEnrich = ENV["AZMON_CLUSTER_CONTAINER_LOG_ENRICH"] + @clusterContainerLogSchemaVersion = ENV["AZMON_CONTAINER_LOG_SCHEMA_VERSION"] @dsPromInterval = ENV["TELEMETRY_DS_PROM_INTERVAL"] @dsPromFieldPassCount = ENV["TELEMETRY_DS_PROM_FIELDPASS_LENGTH"] @@ -65,6 +66,7 @@ class CAdvisorMetricsAPIClient #cadvisor ports @@CADVISOR_SECURE_PORT = "10250" @@CADVISOR_NON_SECURE_PORT = "10255" + def initialize end @@ -85,40 +87,40 @@ def getPodsFromCAdvisor(winNode: nil) end def getBaseCAdvisorUri(winNode) - cAdvisorSecurePort = isCAdvisorOnSecurePort() + cAdvisorSecurePort = isCAdvisorOnSecurePort() + + if !!cAdvisorSecurePort == true + defaultHost = "https://localhost:#{@@CADVISOR_SECURE_PORT}" + else + defaultHost = "http://localhost:#{@@CADVISOR_NON_SECURE_PORT}" + end + + if !winNode.nil? + nodeIP = winNode["InternalIP"] + else + nodeIP = ENV["NODE_IP"] + end + if !nodeIP.nil? + @Log.info("Using #{nodeIP} for CAdvisor Host") if !!cAdvisorSecurePort == true - defaultHost = "https://localhost:#{@@CADVISOR_SECURE_PORT}" + return "https://#{nodeIP}:#{@@CADVISOR_SECURE_PORT}" else - defaultHost = "http://localhost:#{@@CADVISOR_NON_SECURE_PORT}" + return "http://#{nodeIP}:#{@@CADVISOR_NON_SECURE_PORT}" end - + else + @Log.warn ("NODE_IP environment variable not set. Using default as : #{defaultHost}") if !winNode.nil? - nodeIP = winNode["InternalIP"] + return nil else - nodeIP = ENV["NODE_IP"] - end - - if !nodeIP.nil? - @Log.info("Using #{nodeIP} for CAdvisor Host") - if !!cAdvisorSecurePort == true - return "https://#{nodeIP}:#{@@CADVISOR_SECURE_PORT}" - else - return "http://#{nodeIP}:#{@@CADVISOR_NON_SECURE_PORT}" - end - else - @Log.warn ("NODE_IP environment variable not set. Using default as : #{defaultHost}") - if !winNode.nil? - return nil - else - return defaultHost - end + return defaultHost end + end end def getCAdvisorUri(winNode, relativeUri) - baseUri = getBaseCAdvisorUri(winNode) - return baseUri + relativeUri + baseUri = getBaseCAdvisorUri(winNode) + return baseUri + relativeUri end def getMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) @@ -247,22 +249,26 @@ def getContainerCpuMetricItems(metricJSON, hostName, cpuMetricNameToCollect, met telemetryProps["dsPromFDC"] = @dsPromFieldDropCount telemetryProps["dsPromUrl"] = @dsPromUrlCount end - #telemetry about containerlogs Routing for daemonset + #telemetry about containerlog Routing for daemonset if File.exist?(Constants::AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2_FILENAME) telemetryProps["containerLogsRoute"] = "v2" elsif (!@containerLogsRoute.nil? && !@containerLogsRoute.empty?) telemetryProps["containerLogsRoute"] = @containerLogsRoute end - #telemetry about health model - if (!@hmEnabled.nil? && !@hmEnabled.empty?) + #telemetry about health model + if (!@hmEnabled.nil? && !@hmEnabled.empty?) telemetryProps["hmEnabled"] = @hmEnabled - end - #telemetry for npm integration - if (!@npmIntegrationAdvanced.nil? && !@npmIntegrationAdvanced.empty?) - telemetryProps["int-npm-a"] = "1" - elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) - telemetryProps["int-npm-b"] = "1" - end + end + #telemetry for npm integration + if (!@npmIntegrationAdvanced.nil? && !@npmIntegrationAdvanced.empty?) + telemetryProps["int-npm-a"] = "1" + elsif (!@npmIntegrationBasic.nil? && !@npmIntegrationBasic.empty?) + telemetryProps["int-npm-b"] = "1" + end + #telemetry for Container log schema version clusterContainerLogSchemaVersion + if (!@clusterContainerLogSchemaVersion.nil? && !@clusterContainerLogSchemaVersion.empty?) + telemetryProps["containerLogVer"] = @clusterContainerLogSchemaVersion + end ApplicationInsightsUtility.sendMetricTelemetry(metricNametoReturn, metricValue, telemetryProps) end end @@ -303,8 +309,8 @@ def getInsightsMetrics(winNode: nil, metricTime: Time.now.utc.iso8601) end if !metricInfo.nil? metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryTotal", "containerGpumemoryTotalBytes", metricTime)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed","containerGpumemoryUsedBytes", metricTime)) - metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle","containerGpuDutyCycle", metricTime)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "memoryUsed", "containerGpumemoryUsedBytes", metricTime)) + metricDataItems.concat(getContainerGpuMetricsAsInsightsMetrics(metricInfo, hostName, "dutyCycle", "containerGpuDutyCycle", metricTime)) metricDataItems.concat(getPersistentVolumeMetrics(metricInfo, hostName, "usedBytes", Constants::PV_USED_BYTES, metricTime)) else @@ -327,7 +333,6 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric begin metricInfo = metricJSON metricInfo["pods"].each do |pod| - podNamespace = pod["podRef"]["namespace"] excludeNamespace = false if (podNamespace.downcase == "kube-system") && @pvKubeSystemCollectionMetricsEnabled == "false" @@ -351,11 +356,11 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricItem["Computer"] = hostName metricItem["Name"] = metricNameToReturn metricItem["Value"] = volume[metricNameToCollect] - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN metricItem["Namespace"] = Constants::INSIGTHTSMETRICS_TAGS_PV_NAMESPACE - + metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID ] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_UID] = podUid metricTags[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = podName @@ -365,7 +370,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricTags[Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] = volume["capacityBytes"] metricItem["Tags"] = metricTags - + metricItems.push(metricItem) end end @@ -390,7 +395,6 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric return metricItems end - def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCollect, metricNametoReturn, metricPollTime) metricItems = [] clusterId = KubernetesApiClient.getClusterId @@ -410,18 +414,17 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo if (!accelerator[metricNameToCollect].nil?) #empty check is invalid for non-strings containerName = container["name"] metricValue = accelerator[metricNameToCollect] - metricItem = {} metricItem["CollectionTime"] = metricPollTime metricItem["Computer"] = hostName metricItem["Name"] = metricNametoReturn metricItem["Value"] = metricValue - metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN + metricItem["Origin"] = Constants::INSIGHTSMETRICS_TAGS_ORIGIN metricItem["Namespace"] = Constants::INSIGHTSMETRICS_TAGS_GPU_NAMESPACE - + metricTags = {} - metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID ] = clusterId + metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERID] = clusterId metricTags[Constants::INSIGHTSMETRICS_TAGS_CLUSTERNAME] = clusterName metricTags[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] = podUid + "/" + containerName #metricTags[Constants::INSIGHTSMETRICS_TAGS_K8SNAMESPACE] = podNameSpace @@ -437,9 +440,9 @@ def getContainerGpuMetricsAsInsightsMetrics(metricJSON, hostName, metricNameToCo if (!accelerator["id"].nil? && !accelerator["id"].empty?) metricTags[Constants::INSIGHTSMETRICS_TAGS_GPU_ID] = accelerator["id"] end - + metricItem["Tags"] = metricTags - + metricItems.push(metricItem) end end @@ -916,13 +919,13 @@ def getResponse(winNode, relativeUri) uri = URI.parse(cAdvisorUri) if isCAdvisorOnSecurePort() Net::HTTP.start(uri.host, uri.port, - :use_ssl => true, :open_timeout => 20, :read_timeout => 40, - :ca_file => "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", - :verify_mode => OpenSSL::SSL::VERIFY_NONE) do |http| - cAdvisorApiRequest = Net::HTTP::Get.new(uri.request_uri) - cAdvisorApiRequest["Authorization"] = "Bearer #{bearerToken}" - response = http.request(cAdvisorApiRequest) - @Log.info "Got response code #{response.code} from #{uri.request_uri}" + :use_ssl => true, :open_timeout => 20, :read_timeout => 40, + :ca_file => "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", + :verify_mode => OpenSSL::SSL::VERIFY_NONE) do |http| + cAdvisorApiRequest = Net::HTTP::Get.new(uri.request_uri) + cAdvisorApiRequest["Authorization"] = "Bearer #{bearerToken}" + response = http.request(cAdvisorApiRequest) + @Log.info "Got response code #{response.code} from #{uri.request_uri}" end else Net::HTTP.start(uri.host, uri.port, :use_ssl => false, :open_timeout => 20, :read_timeout => 40) do |http| @@ -935,19 +938,24 @@ def getResponse(winNode, relativeUri) rescue => error @Log.warn("CAdvisor api request for #{cAdvisorUri} failed: #{error}") telemetryProps = {} - telemetryProps["Computer"] = winNode["Hostname"] + if !winNode.nil? + hostName = winNode["Hostname"] + else + hostName = (OMS::Common.get_hostname) + end + telemetryProps["Computer"] = hostName ApplicationInsightsUtility.sendExceptionTelemetry(error, telemetryProps) end return response end def isCAdvisorOnSecurePort - cAdvisorSecurePort = false - # Check to see whether omsagent needs to use 10255(insecure) port or 10250(secure) port - if !@cAdvisorMetricsSecurePort.nil? && @cAdvisorMetricsSecurePort == "true" - cAdvisorSecurePort = true - end - return cAdvisorSecurePort + cAdvisorSecurePort = false + # Check to see whether omsagent needs to use 10255(insecure) port or 10250(secure) port + if !@cAdvisorMetricsSecurePort.nil? && @cAdvisorMetricsSecurePort == "true" + cAdvisorSecurePort = true + end + return cAdvisorSecurePort end end end diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index aca2142a0..c5a363741 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -791,7 +791,7 @@ def getKubeAPIServerUrl def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) kubeServiceRecords = [] begin - if (!serviceList.nil? && !serviceList.empty?) + if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].nil? && !serviceList["items"].empty? ) servicesCount = serviceList["items"].length @Log.info("KubernetesApiClient::getKubeServicesInventoryRecords : number of services in serviceList #{servicesCount} @ #{Time.now.utc.iso8601}") serviceList["items"].each do |item| diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb index ef55c3257..7824f3d4e 100644 --- a/source/plugins/ruby/arc_k8s_cluster_identity.rb +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -18,7 +18,7 @@ class ArcK8sClusterIdentity @@crd_resource_uri_template = "%{kube_api_server_url}/apis/%{cluster_config_crd_api_version}/namespaces/%{cluster_identity_resource_namespace}/azureclusteridentityrequests/%{cluster_identity_resource_name}" @@secret_resource_uri_template = "%{kube_api_server_url}/api/v1/namespaces/%{cluster_identity_token_secret_namespace}/secrets/%{token_secret_name}" @@azure_monitor_custom_metrics_audience = "https://monitoring.azure.com/" - @@cluster_identity_request_kind = "AzureClusterIdentityRequest" + @@cluster_identity_request_kind = "AzureClusterIdentityRequest" def initialize @LogPath = "/var/opt/microsoft/docker-cimprov/log/arc_k8s_cluster_identity.log" @@ -33,7 +33,9 @@ def initialize @log.warn "got api server url nil from KubernetesApiClient.getKubeAPIServerUrl @ #{Time.now.utc.iso8601}" end @http_client = get_http_client - @service_account_token = get_service_account_token + @service_account_token = get_service_account_token + @extensionName = ENV["ARC_K8S_EXTENSION_NAME"] + @log.info "extension name:#{@extensionName} @ #{Time.now.utc.iso8601}" @log.info "initialize complete @ #{Time.now.utc.iso8601}" end @@ -148,7 +150,7 @@ def renew_near_expiry_token() update_response = @http_client.request(update_request) @log.info "Got response of #{update_response.code} for PATCH #{crd_request_uri} @ #{Time.now.utc.iso8601}" if update_response.code.to_i == 404 - @log.info "since crd resource doesnt exist since creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" + @log.info "since crd resource doesnt exist hence creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" create_request = Net::HTTP::Post.new(crd_request_uri) create_request["Content-Type"] = "application/json" create_request["Authorization"] = "Bearer #{@service_account_token}" @@ -211,6 +213,9 @@ def get_crd_request_body body["metadata"]["namespace"] = @@cluster_identity_resource_namespace body["spec"] = {} body["spec"]["audience"] = @@azure_monitor_custom_metrics_audience + if !@extensionName.nil? && !@extensionName.empty? + body["spec"]["resourceId"] = @extensionName + end return body end end diff --git a/source/plugins/ruby/in_kube_events.rb b/source/plugins/ruby/in_kube_events.rb index 4f6017cc5..f50019a01 100644 --- a/source/plugins/ruby/in_kube_events.rb +++ b/source/plugins/ruby/in_kube_events.rb @@ -129,6 +129,7 @@ def enumerate def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTime = Time.utc.iso8601) currentTime = Time.now emitTime = currentTime.to_f + @@istestvar = ENV["ISTEST"] begin eventStream = MultiEventStream.new events["items"].each do |items| @@ -171,6 +172,9 @@ def parse_and_emit_records(events, eventQueryState, newEventQueryState, batchTim @eventsCount += 1 end router.emit_stream(@tag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeEventsInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end rescue => errorStr $log.debug_backtrace(errorStr.backtrace) ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index e7c5060a5..c803c0fa2 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -188,6 +188,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream containerNodeInventoryEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("containerNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end # node metrics records @@ -217,6 +220,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodePerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end # node GPU metrics record @@ -249,6 +255,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{@NODES_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream insightsMetricsEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end # Adding telemetry to send node telemetry every 10 minutes timeDifference = (DateTime.now.to_time.to_i - @@nodeTelemetryTimeTracker).abs @@ -300,23 +309,35 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) router.emit_stream(@tag, eventStream) if eventStream $log.info("in_kube_node::parse_and_emit_records: number of mdm node inventory records emitted #{eventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@MDMKubeNodeInventoryTag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end eventStream = nil end if containerNodeInventoryEventStream.count > 0 $log.info("in_kube_node::parse_and_emit_records: number of container node inventory records emitted #{containerNodeInventoryEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@ContainerNodeInventoryTag, containerNodeInventoryEventStream) if containerNodeInventoryEventStream containerNodeInventoryEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("containerNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end if kubePerfEventStream.count > 0 $log.info("in_kube_nodes::parse_and_emit_records: number of node perf metric records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodePerfInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end if insightsMetricsEventStream.count > 0 $log.info("in_kube_nodes::parse_and_emit_records: number of GPU node perf metric records emitted #{insightsMetricsEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(Constants::INSIGHTSMETRICS_FLUENT_TAG, insightsMetricsEventStream) if insightsMetricsEventStream insightsMetricsEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeNodeInsightsMetricsEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end rescue => errorStr $log.warn "Failed to retrieve node inventory: #{errorStr}" @@ -447,7 +468,7 @@ def getNodeTelemetryProps(item) properties["Computer"] = item["metadata"]["name"] nodeInfo = item["status"]["nodeInfo"] properties["KubeletVersion"] = nodeInfo["kubeletVersion"] - properties["OperatingSystem"] = nodeInfo["osImage"] + properties["OperatingSystem"] = nodeInfo["operatingSystem"] properties["KernelVersion"] = nodeInfo["kernelVersion"] properties["OSImage"] = nodeInfo["osImage"] containerRuntimeVersion = nodeInfo["containerRuntimeVersion"] diff --git a/source/plugins/ruby/in_kube_podinventory.rb b/source/plugins/ruby/in_kube_podinventory.rb index 0cff2eefe..5256eb159 100644 --- a/source/plugins/ruby/in_kube_podinventory.rb +++ b/source/plugins/ruby/in_kube_podinventory.rb @@ -265,6 +265,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if @PODS_EMIT_STREAM_BATCH_SIZE > 0 && kubePerfEventStream.count >= @PODS_EMIT_STREAM_BATCH_SIZE $log.info("in_kube_podinventory::parse_and_emit_records: number of container perf records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end kubePerfEventStream = MultiEventStream.new end @@ -306,6 +309,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc $log.info("in_kube_podinventory::parse_and_emit_records: number of perf records emitted #{kubePerfEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeperfTag, kubePerfEventStream) if kubePerfEventStream kubePerfEventStream = nil + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeContainerPerfEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end if insightsMetricsEventStream.count > 0 @@ -345,6 +351,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc $log.info("in_kube_podinventory::parse_and_emit_records: number of service records emitted #{@PODS_EMIT_STREAM_BATCH_SIZE} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream kubeServicesEventStream = MultiEventStream.new + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeServicesEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end end end @@ -352,6 +361,9 @@ def parse_and_emit_records(podInventory, serviceRecords, continuationToken, batc if kubeServicesEventStream.count > 0 $log.info("in_kube_podinventory::parse_and_emit_records : number of service records emitted #{kubeServicesEventStream.count} @ #{Time.now.utc.iso8601}") router.emit_stream(@@kubeservicesTag, kubeServicesEventStream) if kubeServicesEventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubeServicesEventEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end end kubeServicesEventStream = nil end diff --git a/source/plugins/ruby/in_kube_pvinventory.rb b/source/plugins/ruby/in_kube_pvinventory.rb index 861b3a8e1..4efe86f61 100644 --- a/source/plugins/ruby/in_kube_pvinventory.rb +++ b/source/plugins/ruby/in_kube_pvinventory.rb @@ -106,7 +106,7 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) currentTime = Time.now emitTime = currentTime.to_f eventStream = MultiEventStream.new - + @@istestvar = ENV["ISTEST"] begin records = [] pvInventory["items"].each do |item| @@ -156,6 +156,9 @@ def parse_and_emit_records(pvInventory, batchTime = Time.utc.iso8601) end router.emit_stream(@tag, eventStream) if eventStream + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp("true") == 0) + $log.info("kubePVInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") + end rescue => errorStr $log.warn "Failed in parse_and_emit_record for in_kube_pvinventory: #{errorStr}" diff --git a/source/plugins/ruby/kubelet_utils.rb b/source/plugins/ruby/kubelet_utils.rb index 599640d8f..e2c731b79 100644 --- a/source/plugins/ruby/kubelet_utils.rb +++ b/source/plugins/ruby/kubelet_utils.rb @@ -21,9 +21,11 @@ def get_node_capacity response = CAdvisorMetricsAPIClient.getAllMetricsCAdvisor(winNode: nil) if !response.nil? && !response.body.nil? all_metrics = response.body.split("\n") - cpu_capacity = all_metrics.select{|m| m.start_with?('machine_cpu_cores') && m.split.first.strip == 'machine_cpu_cores' }.first.split.last.to_f * 1000 + #cadvisor machine metrics can exist with (>=1.19) or without dimensions (<1.19) + #so just checking startswith of metric name would be good enough to pick the metric value from exposition format + cpu_capacity = all_metrics.select { |m| m.start_with?("machine_cpu_cores") }.first.split.last.to_f * 1000 @log.info "CPU Capacity #{cpu_capacity}" - memory_capacity_e = all_metrics.select{|m| m.start_with?('machine_memory_bytes') && m.split.first.strip == 'machine_memory_bytes' }.first.split.last + memory_capacity_e = all_metrics.select { |m| m.start_with?("machine_memory_bytes") }.first.split.last memory_capacity = BigDecimal(memory_capacity_e).to_f @log.info "Memory Capacity #{memory_capacity}" return [cpu_capacity, memory_capacity] @@ -87,9 +89,9 @@ def get_all_container_limits @log.info "cpuLimit: #{cpuLimit}" @log.info "memoryLimit: #{memoryLimit}" # Get cpu limit in nanocores - containerCpuLimitHash[key] = !cpuLimit.nil? ? KubernetesApiClient.getMetricNumericValue("cpu", cpuLimit) : 0 + containerCpuLimitHash[key] = !cpuLimit.nil? ? KubernetesApiClient.getMetricNumericValue("cpu", cpuLimit) : nil # Get memory limit in bytes - containerMemoryLimitHash[key] = !memoryLimit.nil? ? KubernetesApiClient.getMetricNumericValue("memory", memoryLimit) : 0 + containerMemoryLimitHash[key] = !memoryLimit.nil? ? KubernetesApiClient.getMetricNumericValue("memory", memoryLimit) : nil end end end diff --git a/source/plugins/ruby/kubernetes_container_inventory.rb b/source/plugins/ruby/kubernetes_container_inventory.rb index 69beca493..82e36c8cc 100644 --- a/source/plugins/ruby/kubernetes_container_inventory.rb +++ b/source/plugins/ruby/kubernetes_container_inventory.rb @@ -50,30 +50,7 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa if !atLocation.nil? containerInventoryRecord["ImageId"] = imageIdValue[(atLocation + 1)..-1] end - end - # image is of the format - repository/image:imagetag - imageValue = containerStatus["image"] - if !imageValue.nil? && !imageValue.empty? - # Find delimiters in the string of format repository/image:imagetag - slashLocation = imageValue.index("/") - colonLocation = imageValue.index(":") - if !colonLocation.nil? - if slashLocation.nil? - # image:imagetag - containerInventoryRecord["Image"] = imageValue[0..(colonLocation - 1)] - else - # repository/image:imagetag - containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] - containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..(colonLocation - 1)] - end - containerInventoryRecord["ImageTag"] = imageValue[(colonLocation + 1)..-1] - end - elsif !imageIdValue.nil? && !imageIdValue.empty? - # Getting repo information from imageIdValue when no tag in ImageId - if !atLocation.nil? - containerInventoryRecord["Repository"] = imageIdValue[0..(atLocation - 1)] - end - end + end containerInventoryRecord["ExitCode"] = 0 isContainerTerminated = false isContainerWaiting = false @@ -107,6 +84,51 @@ def getContainerInventoryRecords(podItem, batchTime, clusterCollectEnvironmentVa end containerInfoMap = containersInfoMap[containerName] + # image can be in any one of below format in spec + # repository/image[:imagetag | @digest], repository/image:imagetag@digest, repo/image, image:imagetag, image@digest, image + imageValue = containerInfoMap["image"] + if !imageValue.nil? && !imageValue.empty? + # Find delimiters in image format + atLocation = imageValue.index("@") + isDigestSpecified = false + if !atLocation.nil? + # repository/image@digest or repository/image:imagetag@digest, image@digest + imageValue = imageValue[0..(atLocation - 1)] + # Use Digest from the spec's image in case when the status doesnt get populated i.e. container in pending or image pull back etc. + if containerInventoryRecord["ImageId"].nil? || containerInventoryRecord["ImageId"].empty? + containerInventoryRecord["ImageId"] = imageValue[(atLocation + 1)..-1] + end + isDigestSpecified = true + end + slashLocation = imageValue.index("/") + colonLocation = imageValue.index(":") + if !colonLocation.nil? + if slashLocation.nil? + # image:imagetag + containerInventoryRecord["Image"] = imageValue[0..(colonLocation - 1)] + else + # repository/image:imagetag + containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] + containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..(colonLocation - 1)] + end + containerInventoryRecord["ImageTag"] = imageValue[(colonLocation + 1)..-1] + else + if slashLocation.nil? + # image + containerInventoryRecord["Image"] = imageValue + else + # repo/image + containerInventoryRecord["Repository"] = imageValue[0..(slashLocation - 1)] + containerInventoryRecord["Image"] = imageValue[(slashLocation + 1)..-1] + end + # if no tag specified, k8s assumes latest as imagetag and this is same behavior from docker API and from status. + # Ref - https://kubernetes.io/docs/concepts/containers/images/#image-names + if isDigestSpecified == false + containerInventoryRecord["ImageTag"] = "latest" + end + end + end + podName = containerInfoMap["PodName"] namespace = containerInfoMap["Namespace"] # containername in the format what docker sees @@ -165,6 +187,7 @@ def getContainersInfoMap(podItem, isWindows) podContainers.each do |container| containerInfoMap = {} containerName = container["name"] + containerInfoMap["image"] = container["image"] containerInfoMap["ElementName"] = containerName containerInfoMap["Computer"] = nodeName containerInfoMap["PodName"] = podName diff --git a/test/e2e/e2e-tests.yaml b/test/e2e/e2e-tests.yaml new file mode 100644 index 000000000..06dfa1fb0 --- /dev/null +++ b/test/e2e/e2e-tests.yaml @@ -0,0 +1,178 @@ + +--- +apiVersion: v1 +kind: Namespace +metadata: + name: sonobuoy +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + component: sonobuoy + name: sonobuoy-serviceaccount + namespace: sonobuoy +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + component: sonobuoy + namespace: sonobuoy + name: sonobuoy-serviceaccount-sonobuoy +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: sonobuoy-serviceaccount-sonobuoy +subjects: +- kind: ServiceAccount + name: sonobuoy-serviceaccount + namespace: sonobuoy +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + component: sonobuoy + namespace: sonobuoy + name: sonobuoy-serviceaccount-sonobuoy +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +- nonResourceURLs: + - '/metrics' + - '/logs' + - '/logs/*' + verbs: + - 'get' +--- +apiVersion: v1 +data: + config.json: | + {"Description":"DEFAULT","UUID":"bf5c02ed-1948-48f1-b12d-5a2d74435e46","Version":"v0.20.0","ResultsDir":"/tmp/sonobuoy","Resources":["apiservices","certificatesigningrequests","clusterrolebindings","clusterroles","componentstatuses","configmaps","controllerrevisions","cronjobs","customresourcedefinitions","daemonsets","deployments","endpoints","ingresses","jobs","leases","limitranges","mutatingwebhookconfigurations","namespaces","networkpolicies","nodes","persistentvolumeclaims","persistentvolumes","poddisruptionbudgets","pods","podlogs","podsecuritypolicies","podtemplates","priorityclasses","replicasets","replicationcontrollers","resourcequotas","rolebindings","roles","servergroups","serverversion","serviceaccounts","services","statefulsets","storageclasses","validatingwebhookconfigurations","volumeattachments"],"Filters":{"Namespaces":".*","LabelSelector":""},"Limits":{"PodLogs":{"Namespaces":"","SonobuoyNamespace":true,"FieldSelectors":[],"LabelSelector":"","Previous":false,"SinceSeconds":null,"SinceTime":null,"Timestamps":false,"TailLines":null,"LimitBytes":null,"LimitSize":"","LimitTime":""}},"QPS":30,"Burst":50,"Server":{"bindaddress":"0.0.0.0","bindport":8080,"advertiseaddress":"","timeoutseconds":10800},"Plugins":null,"PluginSearchPath":["./plugins.d","/etc/sonobuoy/plugins.d","~/sonobuoy/plugins.d"],"Namespace":"sonobuoy","WorkerImage":"sonobuoy/sonobuoy:v0.20.0","ImagePullPolicy":"IfNotPresent","ImagePullSecrets":"","ProgressUpdatesPort":"8099"} +kind: ConfigMap +metadata: + labels: + component: sonobuoy + name: sonobuoy-config-cm + namespace: sonobuoy +--- +apiVersion: v1 +data: + plugin-0.yaml: | + podSpec: + containers: [] + restartPolicy: Never + serviceAccountName: sonobuoy-serviceaccount + nodeSelector: + kubernetes.io/os: linux + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + - key: kubernetes.io/e2e-evict-taint-key + operator: Exists + sonobuoy-config: + driver: Job + plugin-name: agenttests + result-format: junit + spec: + env: + # Update values of CLIENT_ID, CLIENT_SECRET of the service principal which has permission to query LA ad Metrics API + # Update value of TENANT_ID corresponding your Azure Service principal + - name: CLIENT_ID + value: "SP_CLIENT_ID_VALUE" + - name: CLIENT_SECRET + value: "CLIENT_SECRET_VALUE" + - name: TENANT_ID + value: "SP_TENANT_ID_VALUE" + - name: DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES + value: "10" + - name: DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES + value: "10" + - name: AGENT_POD_EXPECTED_RESTART_COUNT + value: "0" + - name: AZURE_CLOUD + value: "AZURE_PUBLIC_CLOUD" + # image tag should be updated if new tests being added after this image + image: mcr.microsoft.com/azuremonitor/containerinsights/cidev:ciagenttest02152021 + imagePullPolicy: IfNotPresent + name: plugin + resources: {} + volumeMounts: + - mountPath: /tmp/results + name: results +kind: ConfigMap +metadata: + labels: + component: sonobuoy + name: sonobuoy-plugins-cm + namespace: sonobuoy +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + component: sonobuoy + run: sonobuoy-master + sonobuoy-component: aggregator + tier: analysis + name: sonobuoy + namespace: sonobuoy +spec: + containers: + - env: + - name: SONOBUOY_ADVERTISE_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: sonobuoy/sonobuoy:v0.20.0 + imagePullPolicy: IfNotPresent + name: kube-sonobuoy + volumeMounts: + - mountPath: /etc/sonobuoy + name: sonobuoy-config-volume + - mountPath: /plugins.d + name: sonobuoy-plugins-volume + - mountPath: /tmp/sonobuoy + name: output-volume + restartPolicy: Never + serviceAccountName: sonobuoy-serviceaccount + nodeSelector: + kubernetes.io/os: linux + tolerations: + - key: "kubernetes.io/e2e-evict-taint-key" + operator: "Exists" + volumes: + - configMap: + name: sonobuoy-config-cm + name: sonobuoy-config-volume + - configMap: + name: sonobuoy-plugins-cm + name: sonobuoy-plugins-volume + - emptyDir: {} + name: output-volume +--- +apiVersion: v1 +kind: Service +metadata: + labels: + component: sonobuoy + sonobuoy-component: aggregator + name: sonobuoy-aggregator + namespace: sonobuoy +spec: + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + selector: + sonobuoy-component: aggregator + type: ClusterIP + diff --git a/test/e2e/src/common/arm_rest_utility.py b/test/e2e/src/common/arm_rest_utility.py new file mode 100644 index 000000000..604f8b791 --- /dev/null +++ b/test/e2e/src/common/arm_rest_utility.py @@ -0,0 +1,25 @@ +import adal +import pytest + +from msrestazure.azure_active_directory import AADTokenCredentials + + +# Function to fetch aad token from spn id and password +def fetch_aad_token(client_id, client_secret, authority_uri, resource_uri): + """ + Authenticate using service principal w/ key. + """ + try: + context = adal.AuthenticationContext(authority_uri, api_version=None) + return context.acquire_token_with_client_credentials(resource_uri, client_id, client_secret) + except Exception as e: + pytest.fail("Error occured while fetching aad token: " + str(e)) + + +# Function that returns aad token credentials for a given spn +def fetch_aad_token_credentials(client_id, client_secret, authority_uri, resource_uri): + mgmt_token = fetch_aad_token(client_id, client_secret, authority_uri, resource_uri) + try: + return AADTokenCredentials(mgmt_token, client_id) + except Exception as e: + pytest.fail("Error occured while fetching credentials: " + str(e)) diff --git a/test/e2e/src/common/constants.py b/test/e2e/src/common/constants.py new file mode 100644 index 000000000..770964cb5 --- /dev/null +++ b/test/e2e/src/common/constants.py @@ -0,0 +1,119 @@ +AZURE_PUBLIC_CLOUD_ENDPOINTS = { + "activeDirectory": "https://login.microsoftonline.com/", + "activeDirectoryDataLakeResourceId": "https://datalake.azure.net/", + "activeDirectoryGraphResourceId": "https://graph.windows.net/", + "activeDirectoryResourceId": "https://management.core.windows.net/", + "appInsights": "https://api.applicationinsights.io", + "appInsightsTelemetryChannel": "https://dc.applicationinsights.azure.com/v2/track", + "batchResourceId": "https://batch.core.windows.net/", + "gallery": "https://gallery.azure.com/", + "logAnalytics": "https://api.loganalytics.io", + "management": "https://management.core.windows.net/", + "mediaResourceId": "https://rest.media.azure.net", + "microsoftGraphResourceId": "https://graph.microsoft.com/", + "ossrdbmsResourceId": "https://ossrdbms-aad.database.windows.net", + "resourceManager": "https://management.azure.com/", + "sqlManagement": "https://management.core.windows.net:8443/", + "vmImageAliasDoc": "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/master/arm-compute/quickstart-templates/aliases.json" +} + +AZURE_DOGFOOD_ENDPOINTS = { + "activeDirectory": "https://login.windows-ppe.net/", + "activeDirectoryDataLakeResourceId": None, + "activeDirectoryGraphResourceId": "https://graph.ppe.windows.net/", + "activeDirectoryResourceId": "https://management.core.windows.net/", + "appInsights": None, + "appInsightsTelemetryChannel": None, + "batchResourceId": None, + "gallery": "https://df.gallery.azure-test.net/", + "logAnalytics": None, + "management": "https://management-preview.core.windows-int.net/", + "mediaResourceId": None, + "microsoftGraphResourceId": None, + "ossrdbmsResourceId": None, + "resourceManager": "https://api-dogfood.resources.windows-int.net/", + "sqlManagement": None, + "vmImageAliasDoc": None +} + +AZURE_CLOUD_DICT = {"AZURE_PUBLIC_CLOUD" : AZURE_PUBLIC_CLOUD_ENDPOINTS, "AZURE_DOGFOOD": AZURE_DOGFOOD_ENDPOINTS} + +TIMEOUT = 300 + +# Azure Monitor for Container Extension related +AGENT_RESOURCES_NAMESPACE = 'kube-system' +AGENT_DEPLOYMENT_NAME = 'omsagent-rs' +AGENT_DAEMONSET_NAME = 'omsagent' +AGENT_WIN_DAEMONSET_NAME = 'omsagent-win' + +AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR = 'rsName=omsagent-rs' +AGENT_DAEMON_SET_PODS_LABEL_SELECTOR = 'component=oms-agent' +AGENT_OMSAGENT_LOG_PATH = '/var/opt/microsoft/omsagent/log/omsagent.log' +AGENT_REPLICASET_WORKFLOWS = ["kubePodInventoryEmitStreamSuccess", "kubeNodeInventoryEmitStreamSuccess"] + +# override this through setting enviornment variable if the expected restart count is > 0 for example applying configmap +AGENT_POD_EXPECTED_RESTART_COUNT = 0 + +# replicaset workflow streams +KUBE_POD_INVENTORY_EMIT_STREAM = "kubePodInventoryEmitStreamSuccess" +KUBE_NODE_INVENTORY_EMIT_STREAM = "kubeNodeInventoryEmitStreamSuccess" +KUBE_DEPLOYMENT_INVENTORY_EMIT_STREAM = "kubestatedeploymentsInsightsMetricsEmitStreamSuccess" +KUBE_CONTAINER_PERF_EMIT_STREAM = "kubeContainerPerfEventEmitStreamSuccess" +KUBE_SERVICES_EMIT_STREAM = "kubeServicesEventEmitStreamSuccess" +KUBE_CONTAINER_NODE_INVENTORY_EMIT_STREAM = "containerNodeInventoryEmitStreamSuccess" +KUBE_EVENTS_EMIT_STREAM = "kubeEventsInventoryEmitStreamSuccess" +# daemonset workflow streams +CONTAINER_PERF_EMIT_STREAM = "cAdvisorPerfEmitStreamSuccess" +CONTAINER_INVENTORY_EMIT_STREAM = "containerInventoryEmitStreamSuccess" + +# simple log analytics queries to validate for e2e workflows +DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES = 10 +KUBE_POD_INVENTORY_QUERY = "KubePodInventory | where TimeGenerated > ago({0}) | count" +KUBE_NODE_INVENTORY_QUERY = "KubeNodeInventory | where TimeGenerated > ago({0}) | count" +KUBE_SERVICES_QUERY = "KubeServices | where TimeGenerated > ago({0}) | count" +KUBE_EVENTS_QUERY = "KubeEvents | where TimeGenerated > ago({0}) | count" +CONTAINER_NODE_INVENTORY_QUERY = "ContainerNodeInventory | where TimeGenerated > ago({0}) | count" +CONTAINER_INVENTORY_QUERY = "ContainerInventory | where TimeGenerated > ago({0}) | count" +# node perf +NODE_PERF_CPU_CAPCITY_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'cpuCapacityNanoCores' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_CAPCITY_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'memoryCapacityBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_CPU_ALLOCATABLE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'cpuAllocatableNanoCores' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_ALLOCATABLE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'memoryAllocatableBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_CPU_USAGE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'cpuUsageNanoCores' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_RSS_USAGE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'memoryRssBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_MEMORY_WS_USAGE_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName =='memoryWorkingSetBytes' | where TimeGenerated > ago({0}) | count" +NODE_PERF_RESTART_TIME_EPOCH_QUERY = "Perf | where ObjectName == 'K8SNode' | where CounterName == 'restartTimeEpoch' | where TimeGenerated > ago({0}) | count" +# container perf +CONTAINER_PERF_CPU_LIMITS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'cpuLimitNanoCores' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_LIMITS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryLimitBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_CPU_REQUESTS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'cpuRequestNanoCores' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_REQUESTS_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryRequestBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_CPU_USAGE_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'cpuUsageNanoCores' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_RSS_USAGE_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryRssBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_MEMORY_WS_USAGE_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'memoryWorkingSetBytes' | where TimeGenerated > ago({0}) | count" +CONTAINER_PERF_RESTART_TIME_EPOCH_QUERY = "Perf | where ObjectName == 'K8SContainer' | where CounterName == 'restartTimeEpoch' | where TimeGenerated > ago({0}) | count" +# container log +CONTAINER_LOG_QUERY = "ContainerLog | where TimeGenerated > ago({0}) | count" +# insights metrics +INSIGHTS_METRICS_QUERY = "InsightsMetrics | where TimeGenerated > ago({0}) | count" + +# custom metrics +METRICS_API_VERSION = '2019-07-01' +DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES = 10 + +# node metrics +NODE_METRICS_NAMESPACE = 'insights.container/nodes' +NODE_METRIC_METRIC_AGGREGATION = 'average' +NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME = 'cpuUsageMilliCores' +NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME = 'cpuUsagePercentage' +NODE_MEMORY_RSS_METRIC_NAME = 'memoryRssBytes' +NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME = 'memoryRssPercentage' +NODE_MEMORY_WS_METRIC_NAME = 'memoryWorkingSetBytes' +NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME = 'memoryWorkingSetPercentage' +NODE_COUNT_METRIC_NAME = 'nodesCount' +NODE_DISK_USAGE_PERCENTAGE_METRIC_NAME = 'diskUsedPercentage(Preview)' + +# pod metrics +POD_METRICS_NAMESPACE = 'insights.container/pods' +POD_METRIC_METRIC_AGGREGATION = 'average' +POD_COUNT_METRIC_NAME = 'PodCount' diff --git a/test/e2e/src/common/helm_utility.py b/test/e2e/src/common/helm_utility.py new file mode 100644 index 000000000..6eac1e071 --- /dev/null +++ b/test/e2e/src/common/helm_utility.py @@ -0,0 +1,68 @@ +import os +import pytest +import subprocess + + +# Function to pull helm charts +def pull_helm_chart(registry_path): + os.environ['HELM_EXPERIMENTAL_OCI'] = '1' + cmd_helm_chart_pull = ["helm", "chart", "pull", registry_path] + response_helm_chart_pull = subprocess.Popen(cmd_helm_chart_pull, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_chart_pull, error_helm_chart_pull = response_helm_chart_pull.communicate() + if response_helm_chart_pull.returncode != 0: + pytest.fail("Unable to pull helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_pull.decode("ascii")) + return output_helm_chart_pull.decode("ascii") + + +# Function to export helm charts +def export_helm_chart(registry_path, destination): + cmd_helm_chart_export = ["helm", "chart", "export", registry_path, "--destination", destination] + response_helm_chart_export = subprocess.Popen(cmd_helm_chart_export, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_chart_export, error_helm_chart_export = response_helm_chart_export.communicate() + if response_helm_chart_export.returncode != 0: + pytest.fail("Unable to export helm chart from the registry '{}': ".format(registry_path) + error_helm_chart_export.decode("ascii")) + return output_helm_chart_export.decode("ascii") + + +# Function to add a helm repository +def add_helm_repo(repo_name, repo_url): + cmd_helm_repo = ["helm", "repo", "add", repo_name, repo_url] + response_helm_repo = subprocess.Popen(cmd_helm_repo, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_repo, error_helm_repo = response_helm_repo.communicate() + if response_helm_repo.returncode != 0: + pytest.fail("Unable to add repository {} to helm: ".format(repo_url) + error_helm_repo.decode("ascii")) + return output_helm_repo.decode("ascii") + + +# Function to install helm charts +def install_helm_chart(helm_release_name, helm_release_namespace, helm_chart_path, wait=False, **kwargs): + cmd_helm_install = ["helm", "install", helm_release_name, helm_chart_path, "--namespace", helm_release_namespace] + if wait: + cmd_helm_install.extend(["--wait"]) + for key, value in kwargs.items(): + cmd_helm_install.extend(["--set", "{}={}".format(key, value)]) + response_helm_install = subprocess.Popen(cmd_helm_install, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_install, error_helm_install = response_helm_install.communicate() + if response_helm_install.returncode != 0: + pytest.fail("Unable to install helm release: " + error_helm_install.decode("ascii")) + return output_helm_install.decode("ascii") + + +# Function to delete helm chart +def delete_helm_release(helm_release_name, helm_release_namespace): + cmd_helm_delete = ["helm", "delete", helm_release_name, "--namespace", helm_release_namespace] + response_helm_delete = subprocess.Popen(cmd_helm_delete, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_delete, error_helm_delete = response_helm_delete.communicate() + if response_helm_delete.returncode != 0: + pytest.fail("Error occured while deleting the helm release: " + error_helm_delete.decode("ascii")) + return output_helm_delete.decode("ascii") + + +# Function to list helm release +def list_helm_release(helm_release_namespace): + cmd_helm_list = ["helm", "list", "--namespace", helm_release_namespace] + response_helm_list = subprocess.Popen(cmd_helm_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output_helm_list, error_helm_list = response_helm_list.communicate() + if response_helm_list.returncode != 0: + pytest.fail("Error occured while fetching the helm release: " + error_helm_list.decode("ascii")) + return output_helm_list.decode("ascii") diff --git a/test/e2e/src/common/kubernetes_configmap_utility.py b/test/e2e/src/common/kubernetes_configmap_utility.py new file mode 100644 index 000000000..caee9628e --- /dev/null +++ b/test/e2e/src/common/kubernetes_configmap_utility.py @@ -0,0 +1,8 @@ +import pytest + + +def get_namespaced_configmap(api_instance, namespace, configmap_name): + try: + return api_instance.read_namespaced_config_map(configmap_name, namespace) + except Exception as e: + pytest.fail("Error occured when retrieving configmap: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_crd_utility.py b/test/e2e/src/common/kubernetes_crd_utility.py new file mode 100644 index 000000000..f84092878 --- /dev/null +++ b/test/e2e/src/common/kubernetes_crd_utility.py @@ -0,0 +1,27 @@ +import pytest + +from kubernetes import watch + + +# Function to get the CRD instance +def get_crd_instance(api_instance, group, version, namespace, plural, crd_name): + try: + return api_instance.get_namespaced_custom_object(group, version, namespace, plural, crd_name) + except Exception as e: + pytest.fail("Error occurred when retrieving crd information: " + str(e)) + + +# Function that watches events corresponding to given CRD instance and passes the events to a callback function +def watch_crd_instance(api_instance, group, version, namespace, plural, crd_name, timeout, callback=None): + if not callback: + pytest.fail("callback should be specified") + + field_selector = "metadata.name={}".format(crd_name) if crd_name else "" + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_custom_object, group, version, namespace, plural, field_selector=field_selector, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + pytest.fail("Error occurred when watching crd instance events: " + str(e)) + pytest.fail("The watch on the crd instance events has timed out.") diff --git a/test/e2e/src/common/kubernetes_daemonset_utility.py b/test/e2e/src/common/kubernetes_daemonset_utility.py new file mode 100644 index 000000000..dd76a11d9 --- /dev/null +++ b/test/e2e/src/common/kubernetes_daemonset_utility.py @@ -0,0 +1,36 @@ +import pytest +from kubernetes import watch + +# Returns a list of daemon_sets in a given namespace +def list_daemon_set(api_instance, namespace, field_selector="", label_selector=""): + try: + return api_instance.list_namespaced_daemon_set(namespace, field_selector=field_selector, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occured when retrieving daemon_sets: " + str(e)) + +# Deletes a daemon_set +def delete_daemon_set(api_instance, namespace, daemon_set_name): + try: + return api_instance.delete_namespaced_daemon_set(daemon_set_name, namespace) + except Exception as e: + pytest.fail("Error occured when deleting daemon_set: " + str(e)) + +# Read a daemon_set +def read_daemon_set(api_instance, namespace, daemon_set_name): + try: + return api_instance.read_namespaced_daemon_set(daemon_set_name, namespace) + except Exception as e: + pytest.fail("Error occured when reading daemon_set: " + str(e)) + +# Function that watches events corresponding to daemon_sets in the given namespace and passes the events to a callback function +def watch_daemon_set_status(api_instance, namespace, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_daemon_set, namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + print("Error occurred when checking daemon_set status: " + str(e)) + print("The watch on the daemon_set status has timed out. Please see the pod logs for more info.") diff --git a/test/e2e/src/common/kubernetes_deployment_utility.py b/test/e2e/src/common/kubernetes_deployment_utility.py new file mode 100644 index 000000000..1be7a6b71 --- /dev/null +++ b/test/e2e/src/common/kubernetes_deployment_utility.py @@ -0,0 +1,38 @@ +import pytest +from kubernetes import watch + +# Returns a list of deployments in a given namespace +def list_deployment(api_instance, namespace, field_selector="", label_selector=""): + try: + return api_instance.list_namespaced_deployment(namespace, field_selector=field_selector, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occured when retrieving deployments: " + str(e)) + +# Deletes a deployment +def delete_deployment(api_instance, namespace, deployment_name): + try: + return api_instance.delete_namespaced_deployment(deployment_name, namespace) + except Exception as e: + pytest.fail("Error occured when deleting deployment: " + str(e)) + + +# Read a deployment +def read_deployment(api_instance, namespace, deployment_name): + try: + return api_instance.read_namespaced_deployment(deployment_name, namespace) + except Exception as e: + pytest.fail("Error occured when reading deployment: " + str(e)) + +# Function that watches events corresponding to deployments in the given namespace and passes the events to a callback function +def watch_deployment_status(api_instance, namespace, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_deployment, namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + print("Error occurred when checking deployment status: " + str(e)) + print("The watch on the deployment status has timed out. Please see the pod logs for more info.") + \ No newline at end of file diff --git a/test/e2e/src/common/kubernetes_namespace_utility.py b/test/e2e/src/common/kubernetes_namespace_utility.py new file mode 100644 index 000000000..cea5788c5 --- /dev/null +++ b/test/e2e/src/common/kubernetes_namespace_utility.py @@ -0,0 +1,32 @@ +import pytest +from kubernetes import watch + + +# Function that watches events corresponding to kubernetes namespaces and passes the events to a callback function +def watch_namespace(api_instance, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + pytest.fail("Error occurred when checking namespace status: " + str(e)) + pytest.fail("The watch on the namespaces has timed out.") + + +# Function to list all kubernetes namespaces +def list_namespace(api_instance): + try: + return api_instance.list_namespace() + except Exception as e: + pytest.fail("Error occured when retrieving namespaces: " + str(e)) + + +# Function to delete a kubernetes namespaces +def delete_namespace(api_instance, namespace_name): + try: + return api_instance.delete_namespace(namespace_name) + except Exception as e: + pytest.fail("Error occured when deleting namespace: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_node_utility.py b/test/e2e/src/common/kubernetes_node_utility.py new file mode 100644 index 000000000..050ce8b87 --- /dev/null +++ b/test/e2e/src/common/kubernetes_node_utility.py @@ -0,0 +1,12 @@ +import pytest + +def get_kubernetes_node_count(api_instance): + node_list = list_kubernetes_nodes(api_instance) + return len(node_list.items) + +def list_kubernetes_nodes(api_instance): + try: + return api_instance.list_node() + except Exception as e: + pytest.fail("Error occured while retrieving node information: " + str(e)) + diff --git a/test/e2e/src/common/kubernetes_pod_utility.py b/test/e2e/src/common/kubernetes_pod_utility.py new file mode 100644 index 000000000..27345fae7 --- /dev/null +++ b/test/e2e/src/common/kubernetes_pod_utility.py @@ -0,0 +1,65 @@ +import pytest +import time + +from kubernetes import watch +from kubernetes.stream import stream + +# Returns a kubernetes pod object in given namespace. Object description at: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodList.md +def get_pod(api_instance, namespace, pod_name): + try: + return api_instance.read_namespaced_pod(pod_name, namespace) + except Exception as e: + pytest.fail("Error occured when retrieving pod information: " + str(e)) + + +# Returns a list of kubernetes pod objects in a given namespace. Object description at: https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1PodList.md +def get_pod_list(api_instance, namespace, label_selector=""): + try: + return api_instance.list_namespaced_pod(namespace, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occurred when retrieving pod information: " + str(e)) + +# get the content of the log file in the container via exec +def get_log_file_content(api_instance, namespace, podName, logfilePath): + try: + exec_command = ['tar','cf', '-', logfilePath] + return stream(api_instance.connect_get_namespaced_pod_exec, podName, namespace, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) + except Exception as e: + pytest.fail("Error occurred when retrieving log file content: " + str(e)) + +# Function that watches events corresponding to pods in the given namespace and passes the events to a callback function +def watch_pod_status(api_instance, namespace, timeout, callback=None): + if not callback: + return + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_pod, namespace, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + pytest.fail("Error occurred when checking pod status: " + str(e)) + pytest.fail("The watch on the pods has timed out. Please see the pod logs for more info.") + + +# Function that watches events corresponding to pod logs and passes them to a callback function +def watch_pod_logs(api_instance, namespace, pod_name, container_name, timeout_seconds, callback=None): + if not callback: + return + try: + w = watch.Watch() + timeout = time.time() + timeout_seconds + for event in w.stream(api_instance.read_namespaced_pod_log, pod_name, namespace, container=container_name): + if callback(event): + return + if time.time() > timeout: + pytest.fail("The watch on the pod logs has timed out.") + except Exception as e: + pytest.fail("Error occurred when checking pod logs: " + str(e)) + + +# Function that returns the pod logs of a given container. +def get_pod_logs(api_instance, pod_namespace, pod_name, container_name): + try: + return api_instance.read_namespaced_pod_log(pod_name, pod_namespace, container=container_name) + except Exception as e: + pytest.fail("Error occurred when fetching pod logs: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_secret_utility.py b/test/e2e/src/common/kubernetes_secret_utility.py new file mode 100644 index 000000000..8cc07fd4d --- /dev/null +++ b/test/e2e/src/common/kubernetes_secret_utility.py @@ -0,0 +1,26 @@ +import sys + +from kubernetes import watch + + +# This function returns the kubernetes secret object present in a given namespace +def get_kubernetes_secret(api_instance, namespace, secret_name): + try: + return api_instance.read_namespaced_secret(secret_name, namespace) + except Exception as e: + sys.exit("Error occurred when retrieving secret '{}': ".format(secret_name) + str(e)) + + +# Function that watches events corresponding to kubernetes secrets and passes the events to a callback function +def watch_kubernetes_secret(api_instance, namespace, secret_name, timeout, callback=None): + if not callback: + return + field_selector = "metadata.name={}".format(secret_name) if secret_name else "" + try: + w = watch.Watch() + for event in w.stream(api_instance.list_namespaced_secret, namespace, field_selector=field_selector, timeout_seconds=timeout): + if callback(event): + return + except Exception as e: + sys.exit("Error occurred when watching kubernetes secret events: " + str(e)) + sys.exit("The watch on the kubernetes secret events has timed out. Please see the pod logs for more info.") diff --git a/test/e2e/src/common/kubernetes_service_utility.py b/test/e2e/src/common/kubernetes_service_utility.py new file mode 100644 index 000000000..694af885a --- /dev/null +++ b/test/e2e/src/common/kubernetes_service_utility.py @@ -0,0 +1,19 @@ +import pytest + +from kubernetes import watch + + +# Returns a list of services in a given namespace +def list_service(api_instance, namespace, field_selector="", label_selector=""): + try: + return api_instance.list_namespaced_service(namespace, field_selector=field_selector, label_selector=label_selector) + except Exception as e: + pytest.fail("Error occured when retrieving services: " + str(e)) + + +# Deletes a service +def delete_service(api_instance, namespace, service_name): + try: + return api_instance.delete_namespaced_service(service_name, namespace) + except Exception as e: + pytest.fail("Error occured when deleting service: " + str(e)) diff --git a/test/e2e/src/common/kubernetes_version_utility.py b/test/e2e/src/common/kubernetes_version_utility.py new file mode 100644 index 000000000..884d1df2f --- /dev/null +++ b/test/e2e/src/common/kubernetes_version_utility.py @@ -0,0 +1,9 @@ +import pytest + + +def get_kubernetes_server_version(api_instance): + try: + api_response = api_instance.get_code() + return api_response.git_version + except Exception as e: + pytest.fail("Error occured when retrieving kubernetes server version: " + str(e)) diff --git a/test/e2e/src/common/results_utility.py b/test/e2e/src/common/results_utility.py new file mode 100644 index 000000000..14066bf16 --- /dev/null +++ b/test/e2e/src/common/results_utility.py @@ -0,0 +1,24 @@ +import pytest +import shutil +import tarfile + +from pathlib import Path + + + +# Function to create the test result directory +def create_results_dir(results_dir): + print(results_dir) + try: + Path(results_dir).mkdir(parents=True, exist_ok=True) + except Exception as e: + pytest.fail("Unable to create the results directory: " + str(e)) + + +# Function to append logs from the test run into a result file +def append_result_output(message, result_file_path): + try: + with open(result_file_path, "a") as result_file: + result_file.write(message) + except Exception as e: + pytest.fail("Error while appending message '{}' to results file: ".format(message) + str(e)) diff --git a/test/e2e/src/core/Dockerfile b/test/e2e/src/core/Dockerfile new file mode 100644 index 000000000..9f85bdf4c --- /dev/null +++ b/test/e2e/src/core/Dockerfile @@ -0,0 +1,17 @@ +FROM python:3.6 + +RUN pip install --trusted-host pypi.org --trusted-host files.pythonhosted.org pytest pytest-xdist filelock requests kubernetes adal msrestazure + +RUN curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash \ + && helm version + +COPY ./core/e2e_tests.sh / +COPY ./core/pytest.ini /e2etests/ +COPY ./core/conftest.py /e2etests/ +COPY ./core/helper.py /e2etests/ +COPY ./core/ /e2etests/ +COPY ./common/ /e2etests/ +COPY ./tests/ /e2etests/ + +RUN ["chmod", "+x", "/e2e_tests.sh"] +ENTRYPOINT ["./e2e_tests.sh"] diff --git a/test/e2e/src/core/conftest.py b/test/e2e/src/core/conftest.py new file mode 100644 index 000000000..e659d5189 --- /dev/null +++ b/test/e2e/src/core/conftest.py @@ -0,0 +1,90 @@ +import pytest +import os +import time +import pickle + +import constants + +from filelock import FileLock +from pathlib import Path +from results_utility import create_results_dir, append_result_output + +pytestmark = pytest.mark.agentests + +# Fixture to collect all the environment variables, install pre-requisites. It will be run before the tests. +@pytest.fixture(scope='session', autouse=True) +def env_dict(): + my_file = Path("env.pkl") # File to store the environment variables. + with FileLock(str(my_file) + ".lock"): # Locking the file since each test will be run in parallel as separate subprocesses and may try to access the file simultaneously. + env_dict = {} + if not my_file.is_file(): + # Creating the results directory + create_results_dir('/tmp/results') + + # Setting some environment variables + env_dict['SETUP_LOG_FILE'] = '/tmp/results/setup' + env_dict['TEST_AGENT_LOG_FILE'] = '/tmp/results/containerinsights' + env_dict['NUM_TESTS_COMPLETED'] = 0 + + print("Starting setup...") + append_result_output("Starting setup...\n", env_dict['SETUP_LOG_FILE']) + + # Collecting environment variables + env_dict['TENANT_ID'] = os.getenv('TENANT_ID') + env_dict['CLIENT_ID'] = os.getenv('CLIENT_ID') + env_dict['CLIENT_SECRET'] = os.getenv('CLIENT_SECRET') + + # get default query time interval for log analytics queries + queryTimeInterval = int(os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES + # add minute suffix since this format required for LA queries + env_dict['DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES'] = str(queryTimeInterval) + "m" + + # get default query time interval for metrics queries + env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] = int(os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES')) if os.getenv('DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES') else constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES + + + # expected agent pod restart count + env_dict['AGENT_POD_EXPECTED_RESTART_COUNT'] = int(os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT')) if os.getenv('AGENT_POD_EXPECTED_RESTART_COUNT') else constants.AGENT_POD_EXPECTED_RESTART_COUNT + + # default to azure public cloud if AZURE_CLOUD not specified + env_dict['AZURE_ENDPOINTS'] = constants.AZURE_CLOUD_DICT.get(os.getenv('AZURE_CLOUD')) if os.getenv('AZURE_CLOUD') else constants.AZURE_PUBLIC_CLOUD_ENDPOINTS + + if not env_dict.get('TENANT_ID'): + pytest.fail('ERROR: variable TENANT_ID is required.') + + if not env_dict.get('CLIENT_ID'): + pytest.fail('ERROR: variable CLIENT_ID is required.') + + if not env_dict.get('CLIENT_SECRET'): + pytest.fail('ERROR: variable CLIENT_SECRET is required.') + + print("Setup Complete.") + append_result_output("Setup Complete.\n", env_dict['SETUP_LOG_FILE']) + + with Path.open(my_file, "wb") as f: + pickle.dump(env_dict, f, pickle.HIGHEST_PROTOCOL) + else: + with Path.open(my_file, "rb") as f: + env_dict = pickle.load(f) + + yield env_dict + + my_file = Path("env.pkl") + with FileLock(str(my_file) + ".lock"): + with Path.open(my_file, "rb") as f: + env_dict = pickle.load(f) + + env_dict['NUM_TESTS_COMPLETED'] = 1 + env_dict.get('NUM_TESTS_COMPLETED') + if env_dict['NUM_TESTS_COMPLETED'] == int(os.getenv('NUM_TESTS')): + # Checking if cleanup is required. + if os.getenv('SKIP_CLEANUP'): + return + print('Starting cleanup...') + append_result_output("Starting Cleanup...\n", env_dict['SETUP_LOG_FILE']) + + print("Cleanup Complete.") + append_result_output("Cleanup Complete.\n", env_dict['SETUP_LOG_FILE']) + return + + with Path.open(my_file, "wb") as f: + pickle.dump(env_dict, f, pickle.HIGHEST_PROTOCOL) diff --git a/test/e2e/src/core/e2e_tests.sh b/test/e2e/src/core/e2e_tests.sh new file mode 100644 index 000000000..3bfafdce9 --- /dev/null +++ b/test/e2e/src/core/e2e_tests.sh @@ -0,0 +1,26 @@ +#!/bin/sh + +results_dir="${RESULTS_DIR:-/tmp/results}" + +# saveResults prepares the results for handoff to the Sonobuoy worker. +# See: https://github.com/vmware-tanzu/sonobuoy/blob/master/docs/plugins.md +saveResults() { + cd ${results_dir} + + # Sonobuoy worker expects a tar file. + tar czf results.tar.gz * + + # Signal to the worker that we are done and where to find the results. + printf ${results_dir}/results.tar.gz > ${results_dir}/done +} + +# Ensure that we tell the Sonobuoy worker we are done regardless of results. +trap saveResults EXIT + +# The variable 'TEST_LIST' should be provided if we want to run specific tests. If not provided, all tests are run + +NUM_PROCESS=$(pytest /e2etests/ --collect-only -k "$TEST_NAME_LIST" -m "$TEST_MARKER_LIST" | grep " 0): + pytest.fail("numberMisscheduled shouldnt be greater than 0 for the daemonset {}.".format( + daemonset_name)) + + except Exception as e: + pytest.fail("Error occured while checking daemonset status: " + str(e)) + +# This function checks the status of kubernetes pods +def check_kubernetes_pods_status(pod_namespace, label_selector, expectedPodRestartCount, outfile=None): + try: + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, pod_namespace, label_selector) + append_result_output("podlist output {}\n".format(pod_list), outfile) + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + pods = pod_list.items + if not pods: + pytest.fail("pod items shouldnt be null or empty") + if len(pods) <= 0: + pytest.fail("pod count should be greater than 0") + for pod in pods: + status = pod.status + podstatus = status.phase + if not podstatus: + pytest.fail("status should not be null or empty") + if podstatus != "Running": + pytest.fail("pod status should be in running state") + containerStatuses = status.container_statuses + if not containerStatuses: + pytest.fail("containerStatuses shouldnt be nil or empty") + if len(containerStatuses) <= 0: + pytest.fail("length containerStatuses should be greater than 0") + for containerStatus in containerStatuses: + containerId = containerStatus.container_id + if not containerId: + pytest.fail("containerId shouldnt be nil or empty") + image = containerStatus.image + if not image: + pytest.fail("image shouldnt be nil or empty") + imageId = containerStatus.image_id + if not imageId: + pytest.fail("imageId shouldnt be nil or empty") + restartCount = containerStatus.restart_count + if restartCount > expectedPodRestartCount: + pytest.fail("restartCount shouldnt be greater than expected pod restart count: {}".format(expectedPodRestartCount)) + ready = containerStatus.ready + if not ready: + pytest.fail("container status should be in ready state") + containerState = containerStatus.state + if not containerState.running: + pytest.fail("container state should be in running state") + except Exception as e: + pytest.fail("Error occured while checking pods status: " + str(e)) + + +def check_namespace_status_using_watch(outfile=None, namespace_list=None, timeout=300): + namespace_dict = {} + for namespace in namespace_list: + namespace_dict[namespace] = 0 + append_result_output( + "Namespace dict: {}\n".format(namespace_dict), outfile) + print("Generated the namespace dictionary.") + + # THe callback function to check the namespace status + def namespace_event_callback(event): + try: + append_result_output("{}\n".format(event), outfile) + namespace_name = event['raw_object'].get('metadata').get('name') + namespace_status = event['raw_object'].get('status') + if not namespace_status: + return False + if namespace_status.get('phase') == 'Active': + namespace_dict[namespace_name] = 1 + if all(ele == 1 for ele in list(namespace_dict.values())): + return True + return False + except Exception as e: + pytest.fail( + "Error occured while processing the namespace event: " + str(e)) + + # Checking the namespace status + api_instance = client.CoreV1Api() + watch_namespace(api_instance, timeout, namespace_event_callback) + +# This function checks the status of daemonset in a given namespace. The daemonset to be monitored are identified using the pod label list parameter. +def check_kubernetes_daemonset_status_using_watch(daemonset_namespace, outfile=None, daemonset_label_list=None, timeout=300): + daemonset_label_dict = {} + if daemonset_label_list: # This parameter is a list of label values to identify the daemonsets that we want to monitor in the given namespace + for daemonset_label in daemonset_label_list: + daemonset_label_dict[daemonset_label] = 0 + append_result_output("daemonset label dict: {}\n".format( + daemonset_label_dict), outfile) + print("Generated the daemonset dictionary.") + + # The callback function to check if the pod is in running state + def daemonset_event_callback(event): + try: + # append_result_output("{}\n".format(event), outfile) + daemonset_status = event['raw_object'].get('status') + daemonset_metadata = event['raw_object'].get('metadata') + daemonset_metadata_labels = daemonset_metadata.get('labels') + if not daemonset_metadata_labels: + return False + + # It contains the list of all label values for the pod whose event was called. + daemonset_metadata_label_values = daemonset_metadata_labels.values() + # This label value will be common in pod event and label list provided and will be monitored + current_label_value = None + for label_value in daemonset_metadata_label_values: + if label_value in daemonset_label_dict: + current_label_value = label_value + if not current_label_value: + return False + + currentNumberScheduled = daemonset_status.get( + 'currentNumberScheduled') + desiredNumberScheduled = daemonset_status.get( + 'desiredNumberScheduled') + numberAvailable = daemonset_status.get('numberAvailable') + numberReady = daemonset_status.get('numberReady') + numberMisscheduled = daemonset_status.get('numberMisscheduled') + + if (currentNumberScheduled != desiredNumberScheduled): + pytest.fail("currentNumberScheduled doesnt match with currentNumberScheduled for the daemonset {}.".format( + daemonset_metadata.get('name'))) + + if (numberAvailable != numberReady): + pytest.fail("numberAvailable doesnt match with expected numberReady for the daemonset {}.".format( + daemonset_metadata.get('name'))) + + if (numberMisscheduled > 0): + pytest.fail("numberMisscheduled is greater than 0 for the daemonset {}.".format( + daemonset_metadata.get('name'))) + + return True + except Exception as e: + print("Error occured while processing the pod event: " + str(e)) + + # Checking status of all pods + if daemonset_label_dict: + api_instance = client.AppsV1Api() + watch_daemon_set_status( + api_instance, daemonset_namespace, timeout, daemonset_event_callback) + +# This function checks the status of deployment in a given namespace. The deployment to be monitored are identified using the pod label list parameter. +def check_kubernetes_deployments_status_using_watch(deployment_namespace, outfile=None, deployment_label_list=None, timeout=300): + deployment_label_dict = {} + if deployment_label_list: # This parameter is a list of label values to identify the deployments that we want to monitor in the given namespace + for deployment_label in deployment_label_list: + deployment_label_dict[deployment_label] = 0 + append_result_output("Deployment label dict: {}\n".format( + deployment_label_dict), outfile) + print("Generated the deployment dictionary.") + + # The callback function to check if the pod is in running state + def deployment_event_callback(event): + try: + # append_result_output("{}\n".format(event), outfile) + deployment_status = event['raw_object'].get('status') + deployment_metadata = event['raw_object'].get('metadata') + deployment_metadata_labels = deployment_metadata.get('labels') + if not deployment_metadata_labels: + return False + + # It contains the list of all label values for the deployment whose event was called. + deployment_metadata_label_values = deployment_metadata_labels.values() + # This label value will be common in deployment event and label list provided and will be monitored + current_label_value = None + for label_value in deployment_metadata_label_values: + if label_value in deployment_label_dict: + current_label_value = label_value + if not current_label_value: + return False + + availableReplicas = deployment_status.get('availableReplicas') + readyReplicas = deployment_status.get('readyReplicas') + replicas = deployment_status.get('replicas') + + if (replicas != availableReplicas): + pytest.fail("availableReplicas doesnt match with expected replicas for the deployment {}.".format( + deployment_metadata.get('name'))) + + if (replicas != readyReplicas): + pytest.fail("readyReplicas doesnt match with expected replicas for the deployment {}.".format( + deployment_metadata.get('name'))) + + return True + except Exception as e: + print("Error occured while processing the pod event: " + str(e)) + + # Checking status of all pods + if deployment_label_dict: + api_instance = client.AppsV1Api() + watch_deployment_status( + api_instance, deployment_namespace, timeout, deployment_event_callback) + +# This function checks the status of pods in a given namespace. The pods to be monitored are identified using the pod label list parameter. +def check_kubernetes_pods_status_using_watch(pod_namespace, outfile=None, pod_label_list=None, timeout=300): + pod_label_dict = {} + if pod_label_list: # This parameter is a list of label values to identify the pods that we want to monitor in the given namespace + for pod_label in pod_label_list: + pod_label_dict[pod_label] = 0 + append_result_output( + "Pod label dict: {}\n".format(pod_label_dict), outfile) + print("Generated the pods dictionary.") + + # The callback function to check if the pod is in running state + def pod_event_callback(event): + try: + # append_result_output("{}\n".format(event), outfile) + pod_status = event['raw_object'].get('status') + pod_metadata = event['raw_object'].get('metadata') + pod_metadata_labels = pod_metadata.get('labels') + if not pod_metadata_labels: + return False + + # It contains the list of all label values for the pod whose event was called. + pod_metadata_label_values = pod_metadata_labels.values() + # This label value will be common in pod event and label list provided and will be monitored + current_label_value = None + for label_value in pod_metadata_label_values: + if label_value in pod_label_dict: + current_label_value = label_value + if not current_label_value: + return False + + if pod_status.get('containerStatuses'): + for container in pod_status.get('containerStatuses'): + if container.get('restartCount') > 0: + pytest.fail("The pod {} was restarted. Please see the pod logs for more info.".format( + container.get('name'))) + if not container.get('state').get('running'): + pod_label_dict[current_label_value] = 0 + return False + else: + pod_label_dict[current_label_value] = 1 + if all(ele == 1 for ele in list(pod_label_dict.values())): + return True + return False + except Exception as e: + pytest.fail( + "Error occured while processing the pod event: " + str(e)) + + # Checking status of all pods + if pod_label_dict: + api_instance = client.CoreV1Api() + watch_pod_status(api_instance, pod_namespace, + timeout, pod_event_callback) + + +# Function to check if the crd instance status has been updated with the status fields mentioned in the 'status_list' parameter +def check_kubernetes_crd_status_using_watch(crd_group, crd_version, crd_namespace, crd_plural, crd_name, status_dict={}, outfile=None, timeout=300): + # The callback function to check if the crd event received has been updated with the status fields + def crd_event_callback(event): + try: + append_result_output("{}\n".format(event), outfile) + crd_status = event['raw_object'].get('status') + if not crd_status: + return False + for status_field in status_dict: + if not crd_status.get(status_field): + return False + if crd_status.get(status_field) != status_dict.get(status_field): + pytest.fail( + "The CRD instance status has been updated with incorrect value for '{}' field.".format(status_field)) + return True + except Exception as e: + pytest.fail("Error occured while processing crd event: " + str(e)) + + # Checking if CRD instance has been updated with status fields + api_instance = client.CustomObjectsApi() + watch_crd_instance(api_instance, crd_group, crd_version, crd_namespace, + crd_plural, crd_name, timeout, crd_event_callback) + + +# Function to monitor the pod logs. It will ensure that are logs passed in the 'log_list' parameter are present in the container logs. +def check_kubernetes_pod_logs_using_watch(pod_namespace, pod_name, container_name, logs_list=None, error_logs_list=None, outfile=None, timeout=300): + logs_dict = {} + for log in logs_list: + logs_dict[log] = 0 + print("Generated the logs dictionary.") + + # The callback function to examine the pod log + def pod_log_event_callback(event): + try: + append_result_output("{}\n".format(event), outfile) + for error_log in error_logs_list: + if error_log in event: + pytest.fail("Error log found: " + event) + for log in logs_dict: + if log in event: + logs_dict[log] = 1 + if all(ele == 1 for ele in list(logs_dict.values())): + return True + return False + except Exception as e: + pytest.fail( + "Error occured while processing pod log event: " + str(e)) + + # Checking the pod logs + api_instance = client.CoreV1Api() + watch_pod_logs(api_instance, pod_namespace, pod_name, + container_name, timeout, pod_log_event_callback) + +# Function to monitor the kubernetes secret. It will determine if the secret has been successfully created. +def check_kubernetes_secret_using_watch(secret_namespace, secret_name, timeout=300): + # The callback function to check if the secret event received has secret data + def secret_event_callback(event): + try: + secret_data = event['raw_object'].get('data') + if not secret_data: + return False + return True + except Exception as e: + pytest.fail( + "Error occured while processing secret event: " + str(e)) + + # Checking the kubernetes secret + api_instance = client.CoreV1Api() + watch_kubernetes_secret(api_instance, secret_namespace, + secret_name, timeout, secret_event_callback) diff --git a/test/e2e/src/core/pytest.ini b/test/e2e/src/core/pytest.ini new file mode 100644 index 000000000..f4dc462f0 --- /dev/null +++ b/test/e2e/src/core/pytest.ini @@ -0,0 +1,4 @@ +[pytest] +markers = + agentests: marks tests are a part of arc agent conformance tests (deselect with '-m "not agentests"') + \ No newline at end of file diff --git a/test/e2e/src/tests/test_ds_workflows.py b/test/e2e/src/tests/test_ds_workflows.py new file mode 100755 index 000000000..81ef08325 --- /dev/null +++ b/test/e2e/src/tests/test_ds_workflows.py @@ -0,0 +1,60 @@ +import pytest +import constants + +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list, get_log_file_content +from results_utility import append_result_output +from helper import check_kubernetes_deployment_status +from helper import check_kubernetes_daemonset_status +from helper import check_kubernetes_pods_status +from kubernetes.stream import stream + +pytestmark = pytest.mark.agentests + +# validation of ds agent workflows +def test_ds_workflows(env_dict): + print("Starting daemonset agent workflows test.") + append_result_output("test_ds_workflows start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + print("getting daemonset pod list") + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DAEMON_SET_PODS_LABEL_SELECTOR) + if not pod_list: + pytest.fail("daemonset pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in daemonset pod list should be greater than 0") + + for podItem in pod_list.items: + podName = podItem.metadata.name + logcontent = get_log_file_content( + api_instance, constants.AGENT_RESOURCES_NAMESPACE, podName, constants.AGENT_OMSAGENT_LOG_PATH) + if not logcontent: + pytest.fail("logcontent should not be null or empty for pod: " + podName) + loglines = logcontent.split("\n") + if len(loglines) <= 0: + pytest.fail("number of log lines should be greater than 0 for pod :" + podName) + + IsContainerPerfEmitStream = False + IsContainerInventoryStream = False + for line in loglines: + if line.find(constants.CONTAINER_PERF_EMIT_STREAM) >= 0: + IsContainerPerfEmitStream = True + if line.find(constants.CONTAINER_INVENTORY_EMIT_STREAM) >= 0: + IsContainerInventoryStream = True + + if IsContainerPerfEmitStream == False: + pytest.fail("ContainerPerf stream not emitted successfully from pod:" + podName) + if IsContainerInventoryStream == False: + pytest.fail("ContainerInventory stream not emitted successfully from pod:" + podName) + + append_result_output("test_ds_workflows end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed daemonset workflows test.") diff --git a/test/e2e/src/tests/test_e2e_workflows.py b/test/e2e/src/tests/test_e2e_workflows.py new file mode 100755 index 000000000..11a8e18e3 --- /dev/null +++ b/test/e2e/src/tests/test_e2e_workflows.py @@ -0,0 +1,330 @@ +import pytest +import constants +import requests + +from arm_rest_utility import fetch_aad_token +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list +from results_utility import append_result_output + + +pytestmark = pytest.mark.agentests + +# validation of workflows e2e +def test_e2e_workflows(env_dict): + print("Starting e2e workflows test.") + append_result_output("test_e2e_workflows start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # query time interval for LA queries + queryTimeInterval = env_dict['DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES'] + if not queryTimeInterval: + pytest.fail("DEFAULT_QUERY_TIME_INTERVAL_IN_MINUTES should not be null or empty") + + # get the cluster resource id from replicaset pod envvars + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + envVars = pod_list.items[0].spec.containers[0].env + if not envVars: + pytest.fail("environment variables should be defined in the replicaset pod") + + clusterResourceId = '' + for env in envVars: + if env.name == "AKS_RESOURCE_ID": + clusterResourceId = env.value + print("cluster resource id: {}".format(clusterResourceId)) + + if not clusterResourceId: + pytest.fail("failed to get clusterResourceId from replicaset pod environment variables") + + # fetch AAD token for log analytics resource for the queries + tenant_id = env_dict.get('TENANT_ID') + authority_uri = env_dict.get('AZURE_ENDPOINTS').get('activeDirectory') + tenant_id + client_id = env_dict.get('CLIENT_ID') + client_secret = env_dict.get('CLIENT_SECRET') + resource = env_dict.get('AZURE_ENDPOINTS').get('logAnalytics') + aad_token = fetch_aad_token(client_id, client_secret, authority_uri, resource) + if not aad_token: + pytest.fail("failed to fetch AAD token") + + access_token = aad_token.get('accessToken') + if not access_token: + pytest.fail("access_token shouldnt be null or empty") + + # validate e2e workflows by checking data in log analytics workspace through resource centric queries + queryUrl = resource + "/v1" + clusterResourceId + "/query" + Headers = { + "Authorization": str("Bearer " + access_token), + "Content-Type": "application/json" + } + # KubePodInventory + query = constants.KUBE_POD_INVENTORY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_POD_INVENTORY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} and workflow: {1}".format(clusterResourceId, 'KUBE_POD_INVENTORY')) + + # KubeNodeInventory + query = constants.KUBE_NODE_INVENTORY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_NODE_INVENTORY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'KUBE_NODE_INVENTORY')) + + # KubeServices + query = constants.KUBE_SERVICES_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_SERVICES')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'KUBE_SERVICES')) + + # KubeEvents + query = constants.KUBE_EVENTS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('KUBE_EVENTS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'KUBE_EVENTS')) + + # Container Node Inventory + query = constants.CONTAINER_NODE_INVENTORY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_NODE_INVENTORY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_NODE_INVENTORY')) + + # Node Perf + # cpu capacity + query = constants.NODE_PERF_CPU_CAPCITY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_CPU_CAPCITY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_CPU_CAPCITY')) + + # memory capacity + query = constants.NODE_PERF_MEMORY_CAPCITY_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_CAPCITY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_CAPCITY')) + + # cpu allocatable + query = constants.NODE_PERF_CPU_ALLOCATABLE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_CPU_ALLOCATABLE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_CPU_ALLOCATABLE')) + + # memory allocatable + query = constants.NODE_PERF_MEMORY_ALLOCATABLE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_ALLOCATABLE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_ALLOCATABLE')) + + # cpu usage + query = constants.NODE_PERF_CPU_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_CPU_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_CPU_USAGE')) + + # memory rss usage + query = constants.NODE_PERF_MEMORY_RSS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_RSS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_RSS_USAGE')) + + # memory ws usage + query = constants.NODE_PERF_MEMORY_WS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_MEMORY_WS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_MEMORY_WS_USAGE')) + + # restartime epoch + query = constants.NODE_PERF_RESTART_TIME_EPOCH_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('NODE_PERF_RESTART_TIME_EPOCH')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'NODE_PERF_RESTART_TIME_EPOCH')) + + # Container Perf + # container cpu limits + query = constants.CONTAINER_PERF_CPU_LIMITS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_CPU_LIMITS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_CPU_LIMITS')) + + # container memory limits + query = constants.CONTAINER_PERF_MEMORY_LIMITS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_LIMITS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_LIMITS')) + + # cpu requests + query = constants.CONTAINER_PERF_CPU_REQUESTS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_CPU_REQUESTS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_CPU_REQUESTS')) + + # memory requests + query = constants.CONTAINER_PERF_MEMORY_REQUESTS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_REQUESTS_QUERY')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_REQUESTS')) + + # cpu usage + query = constants.CONTAINER_PERF_CPU_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_CPU_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_CPU_USAGE')) + + # memory rss usage + query = constants.CONTAINER_PERF_MEMORY_RSS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_RSS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_RSS_USAGE')) + + # memory ws usage + query = constants.CONTAINER_PERF_MEMORY_WS_USAGE_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_MEMORY_WS_USAGE')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_MEMORY_WS_USAGE')) + + # restart time epoch + query = constants.CONTAINER_PERF_RESTART_TIME_EPOCH_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_PERF_RESTART_TIME_EPOCH')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_PERF_RESTART_TIME_EPOCH')) + + # Container log + query = constants.CONTAINER_LOG_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('CONTAINER_LOG')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'CONTAINER_LOG')) + + # InsightsMetrics + query = constants.INSIGHTS_METRICS_QUERY.format(queryTimeInterval) + params = { 'query': query} + result = requests.get(queryUrl, params=params, headers=Headers, verify=False) + if not result: + pytest.fail("log analytics query response shouldnt be null or empty for workflow: {0}".format('INSIGHTS_METRICS')) + + rowCount = result.json()['tables'][0]['rows'][0][0] + if not rowCount: + pytest.fail("rowCount should be greater than for cluster: {0} for workflow: {1} ".format(clusterResourceId, 'INSIGHTS_METRICS')) + + append_result_output("test_e2e_workflows end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed e2e workflows test.") diff --git a/test/e2e/src/tests/test_node_metrics_e2e_workflow.py b/test/e2e/src/tests/test_node_metrics_e2e_workflow.py new file mode 100755 index 000000000..4346f89a8 --- /dev/null +++ b/test/e2e/src/tests/test_node_metrics_e2e_workflow.py @@ -0,0 +1,420 @@ +import pytest +import constants +import requests + +from arm_rest_utility import fetch_aad_token +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list +from results_utility import append_result_output +from datetime import datetime, timedelta + +pytestmark = pytest.mark.agentests + +# validation of node metrics e2e workflow +def test_node_metrics_e2e_workflow(env_dict): + print("Starting node metrics e2e workflow test.") + append_result_output("test_node_metrics_e2e_workflow start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # query time interval for metric queries + metricQueryIntervalInMins = env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] + if not metricQueryIntervalInMins: + pytest.fail( + "DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES should not be null or empty or 0") + + # get the cluster resource id from replicaset pod envvars + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + envVars = pod_list.items[0].spec.containers[0].env + if not envVars: + pytest.fail( + "environment variables should be defined in the replicaset pod") + + clusterResourceId = '' + for env in envVars: + if env.name == "AKS_RESOURCE_ID": + clusterResourceId = env.value + print("cluster resource id: {}".format(clusterResourceId)) + + if not clusterResourceId: + pytest.fail( + "failed to get clusterResourceId from replicaset pod environment variables") + + # fetch AAD token for metric queries + tenant_id = env_dict.get('TENANT_ID') + authority_uri = env_dict.get('AZURE_ENDPOINTS').get( + 'activeDirectory') + tenant_id + client_id = env_dict.get('CLIENT_ID') + client_secret = env_dict.get('CLIENT_SECRET') + resourceManager = env_dict.get('AZURE_ENDPOINTS').get('resourceManager') + aad_token = fetch_aad_token( + client_id, client_secret, authority_uri, resourceManager) + if not aad_token: + pytest.fail("failed to fetch AAD token") + + access_token = aad_token.get('accessToken') + if not access_token: + pytest.fail("access_token shouldnt be null or empty") + + # validate metrics e2e workflow + now = datetime.utcnow() + endtime = now.isoformat()[:-3]+'Z' + starttime = (now - timedelta(hours=0, + minutes=constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES)).isoformat()[:-3]+'Z' + Headers = { + "Authorization": str("Bearer " + access_token), + "Content-Type": "application/json", + "content-length": "0" + } + params = {} + # node metric - memoryRssBytes + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_RSS_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail( + "response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_RSS_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_RSS_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_RSS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_RSS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - memoryRssPercentage + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail( + "response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_RSS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - memoryWorkingSetBytes + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_WS_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_WS_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_WS_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_WS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORYE_WS_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - memoryWorkingSetPercentage + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_MEMORY_WS_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - cpuUsageMilliCores + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_CPU_USAGE_MILLI_CORES_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - cpuUsagePercentage + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_CPU_USAGE_PERCENTAGE_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + # node metric - nodesCount + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.NODE_COUNT_METRIC_NAME, + constants.NODE_METRIC_METRIC_AGGREGATION, + constants.NODE_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail("response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format(response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.NODE_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.NODE_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.NODE_COUNT_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.NODE_COUNT_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.NODE_COUNT_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.NODE_COUNT_METRIC_NAME, constants.NODE_METRICS_NAMESPACE)) + + append_result_output("test_node_metrics_e2e_workflow end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed node metrics e2e workflow test.") diff --git a/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py b/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py new file mode 100755 index 000000000..cd4260f76 --- /dev/null +++ b/test/e2e/src/tests/test_pod_metrics_e2e_workflow.py @@ -0,0 +1,134 @@ +import pytest +import constants +import requests + +from arm_rest_utility import fetch_aad_token +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list +from results_utility import append_result_output +from datetime import datetime, timedelta + +pytestmark = pytest.mark.agentests + +# validation of pod metrics e2e workflows +def test_pod_metrics_e2e_workflow(env_dict): + print("Starting pod metrics e2e workflows test.") + append_result_output("test_pod_metrics_e2e_workflow start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # query time interval for metrics queries + metricQueryIntervalInMins = env_dict['DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES'] + if not metricQueryIntervalInMins: + pytest.fail( + "DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES should not be null or empty or 0") + + # get the cluster resource id from replicaset pod envvars + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + envVars = pod_list.items[0].spec.containers[0].env + if not envVars: + pytest.fail( + "environment variables should be defined in the replicaset pod") + + clusterResourceId = '' + for env in envVars: + if env.name == "AKS_RESOURCE_ID": + clusterResourceId = env.value + print("cluster resource id: {}".format(clusterResourceId)) + + if not clusterResourceId: + pytest.fail( + "failed to get clusterResourceId from replicaset pod environment variables") + + # fetch AAD token for metrics queries + tenant_id = env_dict.get('TENANT_ID') + authority_uri = env_dict.get('AZURE_ENDPOINTS').get( + 'activeDirectory') + tenant_id + client_id = env_dict.get('CLIENT_ID') + client_secret = env_dict.get('CLIENT_SECRET') + resourceManager = env_dict.get('AZURE_ENDPOINTS').get('resourceManager') + aad_token = fetch_aad_token( + client_id, client_secret, authority_uri, resourceManager) + if not aad_token: + pytest.fail("failed to fetch AAD token") + + access_token = aad_token.get('accessToken') + if not access_token: + pytest.fail("access_token shouldnt be null or empty") + + # validate metrics e2e workflow + now = datetime.utcnow() + endtime = now.isoformat()[:-3]+'Z' + starttime = (now - timedelta(hours=0, + minutes=constants.DEFAULT_METRICS_QUERY_TIME_INTERVAL_IN_MINUTES)).isoformat()[:-3]+'Z' + Headers = { + "Authorization": str("Bearer " + access_token), + "Content-Type": "application/json", + "content-length": "0" + } + params = {} + # pod metric - PodCount + custommetricsUrl = '{0}{1}/providers/microsoft.Insights/metrics?timespan={2}/{3}&interval=FULL&metricnames={4}&aggregation={5}&metricNamespace={6}&validatedimensions=false&api-version={7}'.format( + resourceManager.rstrip("/"), + clusterResourceId, + starttime, + endtime, + constants.POD_COUNT_METRIC_NAME, + constants.POD_METRIC_METRIC_AGGREGATION, + constants.POD_METRICS_NAMESPACE, + constants.METRICS_API_VERSION) + + response = requests.get(custommetricsUrl, params=params, + headers=Headers, verify=False) + + if not response: + pytest.fail( + "response of the metrics query API shouldnt be null or empty") + + if response.status_code != 200: + pytest.fail("metrics query API failed with an error code: {}".format( + response.status_code)) + + responseJSON = response.json() + if not responseJSON: + pytest.fail("response JSON shouldnt be null or empty") + + namespace = responseJSON['namespace'] + if namespace != constants.POD_METRICS_NAMESPACE: + pytest.fail("got the namespace: {0} but expected namespace:{1} in the response".format( + namespace, constants.POD_METRICS_NAMESPACE)) + + responseValues = responseJSON['value'] + if not responseValues: + pytest.fail("response JSON shouldnt be null or empty") + + if len(responseValues) <= 0: + pytest.fail("length of value array in the response should be greater than 0") + + for responseVal in responseValues: + metricName = responseVal['name']['value'] + if metricName != constants.POD_COUNT_METRIC_NAME: + pytest.fail("got the metricname: {0} but expected metricname:{1} in the response".format(metricName, constants.POD_COUNT_METRIC_NAME)) + timeseries = responseVal['timeseries'] + if not timeseries: + pytest.fail("metric series shouldnt be null or empty for metric:{0} in namespace: {1}".format( + constants.POD_COUNT_METRIC_NAME, constants.POD_METRICS_NAMESPACE)) + if len(timeseries) <= 0: + pytest.fail("length of timeseries should be greater than for 0 for metric: {0} in namespace :{1}".format(constants.POD_COUNT_METRIC_NAME, constants.POD_METRICS_NAMESPACE)) + + append_result_output("test_pod_metrics_e2e_workflow end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed e2e workflows test.") diff --git a/test/e2e/src/tests/test_resource_status.py b/test/e2e/src/tests/test_resource_status.py new file mode 100755 index 000000000..bb63dac7c --- /dev/null +++ b/test/e2e/src/tests/test_resource_status.py @@ -0,0 +1,43 @@ +import pytest +import constants + +from kubernetes import client, config +from results_utility import append_result_output +from helper import check_kubernetes_deployment_status +from helper import check_kubernetes_daemonset_status +from helper import check_kubernetes_pods_status + +pytestmark = pytest.mark.agentests + +# validate all the critical resources such as ds, rs, ds pods and rs pod etc. are up and running +def test_resource_status(env_dict): + print("Starting resource status check.") + append_result_output("test_resource_status start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + #config.load_kube_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + # checking the deployment status + check_kubernetes_deployment_status( + constants.AGENT_RESOURCES_NAMESPACE, constants.AGENT_DEPLOYMENT_NAME, env_dict['TEST_AGENT_LOG_FILE']) + + # checking the daemonset status + check_kubernetes_daemonset_status( + constants.AGENT_RESOURCES_NAMESPACE, constants.AGENT_DAEMONSET_NAME, env_dict['TEST_AGENT_LOG_FILE']) + + expectedPodRestartCount = env_dict['AGENT_POD_EXPECTED_RESTART_COUNT'] + # checking deployment pod status + check_kubernetes_pods_status(constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR, expectedPodRestartCount, env_dict['TEST_AGENT_LOG_FILE']) + + # checking daemonset pod status + check_kubernetes_pods_status(constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DAEMON_SET_PODS_LABEL_SELECTOR, expectedPodRestartCount, env_dict['TEST_AGENT_LOG_FILE']) + + append_result_output("test_resource_status end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully checked resource status check.") diff --git a/test/e2e/src/tests/test_rs_workflows.py b/test/e2e/src/tests/test_rs_workflows.py new file mode 100755 index 000000000..aef422171 --- /dev/null +++ b/test/e2e/src/tests/test_rs_workflows.py @@ -0,0 +1,93 @@ +import pytest +import constants + +from kubernetes import client, config +from kubernetes_pod_utility import get_pod_list, get_log_file_content +from results_utility import append_result_output +from helper import check_kubernetes_deployment_status +from helper import check_kubernetes_daemonset_status +from helper import check_kubernetes_pods_status +from kubernetes.stream import stream + +pytestmark = pytest.mark.agentests + +# validation of replicaset agent workflows +def test_rs_workflows(env_dict): + print("Starting replicaset agent workflows test.") + append_result_output("test_rs_workflows start \n", + env_dict['TEST_AGENT_LOG_FILE']) + # Loading in-cluster kube-config + try: + config.load_incluster_config() + except Exception as e: + pytest.fail("Error loading the in-cluster config: " + str(e)) + + print("getting pod list") + api_instance = client.CoreV1Api() + pod_list = get_pod_list(api_instance, constants.AGENT_RESOURCES_NAMESPACE, + constants.AGENT_DEPLOYMENT_PODS_LABEL_SELECTOR) + if not pod_list: + pytest.fail("pod_list shouldnt be null or empty") + + if len(pod_list.items) <= 0: + pytest.fail("number of items in pod list should be greater than 0") + + rspodName = pod_list.items[0].metadata.name + if not rspodName: + pytest.fail("replicaset pod name should not be null or empty") + + logcontent = get_log_file_content( + api_instance, constants.AGENT_RESOURCES_NAMESPACE, rspodName, constants.AGENT_OMSAGENT_LOG_PATH) + if not logcontent: + pytest.fail("logcontent should not be null or empty for rs pod: {}".format(rspodName)) + loglines = logcontent.split("\n") + if len(loglines) <= 0: + pytest.fail("number of log lines should be greater than 0") + + IsKubePodInventorySuccessful = False + IsKubeNodeInventorySuccessful = False + IsKubeDeploymentInventorySuccessful = False + IsKubeContainerPerfInventorySuccessful = False + IsKubeServicesInventorySuccessful = False + IsContainerNodeInventorySuccessful = False + IsKubeEventsSuccessful = False + for line in loglines: + if line.find(constants.KUBE_POD_INVENTORY_EMIT_STREAM) >= 0: + IsKubePodInventorySuccessful = True + if line.find(constants.KUBE_NODE_INVENTORY_EMIT_STREAM) >= 0: + IsKubeNodeInventorySuccessful = True + if line.find(constants.KUBE_DEPLOYMENT_INVENTORY_EMIT_STREAM) >= 0: + IsKubeDeploymentInventorySuccessful = True + if line.find(constants.KUBE_CONTAINER_PERF_EMIT_STREAM) >= 0: + IsKubeContainerPerfInventorySuccessful = True + if line.find(constants.KUBE_SERVICES_EMIT_STREAM) >= 0: + IsKubeServicesInventorySuccessful = True + if line.find(constants.KUBE_CONTAINER_NODE_INVENTORY_EMIT_STREAM) >= 0: + IsContainerNodeInventorySuccessful = True + if line.find(constants.KUBE_EVENTS_EMIT_STREAM) >= 0: + IsKubeEventsSuccessful = True + + if IsKubePodInventorySuccessful == False: + pytest.fail("KubePodInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeNodeInventorySuccessful == False: + pytest.fail("KubeNodeInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeDeploymentInventorySuccessful == False: + pytest.fail("KubeDeploymentInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeContainerPerfInventorySuccessful == False: + pytest.fail("KubeContainerPerfInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeServicesInventorySuccessful == False: + pytest.fail("KubeServicesInventory stream not emitted successfully from pod:" + rspodName) + + if IsContainerNodeInventorySuccessful == False: + pytest.fail("ContainerNodeInventory stream not emitted successfully from pod:" + rspodName) + + if IsKubeEventsSuccessful == False: + pytest.fail("KubeEventsInventory stream not emitted successfully from rs pod:" + rspodName) + + append_result_output("test_rs_workflows end \n", + env_dict['TEST_AGENT_LOG_FILE']) + print("Successfully completed replicaset workflows test.") From 539da2f9a72ca4034d8e23823dd545058499ca11 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 26 Mar 2021 15:42:35 -0700 Subject: [PATCH 10/18] ci_dev to ci_prod merge for March release (#520) --- README.md | 1 + ReleaseNotes.md | 108 +++++ ReleaseProcess.md | 36 +- .../scripts/tomlparser-prom-customconfig.rb | 423 ++++++++++++++++++ .../installer/conf/prometheus-side-car.conf | 4 + .../conf/td-agent-bit-prom-side-car.conf | 28 ++ .../conf/telegraf-prom-side-car.conf | 162 +++++++ build/linux/installer/conf/telegraf-rs.conf | 23 +- build/linux/installer/conf/telegraf.conf | 4 +- .../installer/datafiles/base_container.data | 28 +- .../linux/installer/scripts/livenessprobe.sh | 27 +- .../scripts/tomlparser-osm-config.rb | 168 +++++++ .../scripts/tomlparser-prom-customconfig.rb | 267 ----------- build/version | 4 +- build/windows/installer/conf/fluent-bit.conf | 9 + build/windows/installer/conf/telegraf.conf | 162 +++++++ charts/azuremonitor-containers/Chart.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 6 + .../templates/omsagent-daemonset.yaml | 1 + .../templates/omsagent-deployment.yaml | 12 +- .../templates/omsagent-rbac.yaml | 2 +- .../templates/omsagent-secret.yaml | 14 +- charts/azuremonitor-containers/values.yaml | 12 +- kubernetes/container-azm-ms-agentconfig.yaml | 11 + kubernetes/container-azm-ms-osmconfig.yaml | 17 + kubernetes/linux/Dockerfile | 4 +- kubernetes/linux/defaultpromenvvariables-rs | 19 +- .../linux/defaultpromenvvariables-sidecar | 9 + kubernetes/linux/main.sh | 209 ++++++--- kubernetes/linux/setup.sh | 8 +- kubernetes/omsagent.yaml | 87 +++- kubernetes/windows/Dockerfile | 6 +- kubernetes/windows/main.ps1 | 113 +++-- .../setdefaulttelegrafenvvariables.ps1 | 17 + kubernetes/windows/setup.ps1 | 16 + .../windows/install-build-pre-requisites.ps1 | 6 +- .../onboarding/managed/disable-monitoring.sh | 2 +- .../onboarding/managed/enable-monitoring.ps1 | 2 +- .../onboarding/managed/enable-monitoring.sh | 16 +- .../onboarding/managed/upgrade-monitoring.sh | 6 +- .../existingClusterOnboarding.json | 5 +- source/plugins/go/src/oms.go | 2 +- source/plugins/go/src/telemetry.go | 157 +++++-- .../plugins/ruby/CAdvisorMetricsAPIClient.rb | 2 +- .../plugins/ruby/arc_k8s_cluster_identity.rb | 45 +- source/plugins/ruby/in_kube_nodes.rb | 6 + 46 files changed, 1781 insertions(+), 487 deletions(-) create mode 100644 build/common/installer/scripts/tomlparser-prom-customconfig.rb create mode 100644 build/linux/installer/conf/prometheus-side-car.conf create mode 100644 build/linux/installer/conf/td-agent-bit-prom-side-car.conf create mode 100644 build/linux/installer/conf/telegraf-prom-side-car.conf create mode 100644 build/linux/installer/scripts/tomlparser-osm-config.rb delete mode 100644 build/linux/installer/scripts/tomlparser-prom-customconfig.rb create mode 100644 build/windows/installer/conf/telegraf.conf create mode 100644 kubernetes/container-azm-ms-osmconfig.yaml create mode 100644 kubernetes/linux/defaultpromenvvariables-sidecar create mode 100644 kubernetes/windows/setdefaulttelegrafenvvariables.ps1 diff --git a/README.md b/README.md index 3564345ee..555234c61 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,7 @@ The general directory structure is: │ │ ├── acrworkflows/ - acr work flows for the Linux Agent container image │ │ ├── defaultpromenvvariables - default environment variables for Prometheus scraping │ │ ├── defaultpromenvvariables-rs - cluster level default environment variables for Prometheus scraping +│ │ ├── defaultpromenvvariables-sidecar - cluster level default environment variables for Prometheus scraping in sidecar │ ├── windows/ - scripts to build the Docker image for Windows Agent │ │ ├── dockerbuild - script to build the code and docker imag, and publish docker image │ │ ├── acrworkflows/ - acr work flows for the Windows Agent container image diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 80d6f188d..01fb861cf 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -102,6 +102,114 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Telemetry fix for agent telemetry for sov. clouds +### 03/26/2021 - +##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) +##### Version microsoft/oms:win-ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021 (windows) +##### Code change log +- Started collecting new metric - kubelet running pods count +- Onboarding script fixes to add explicit json output +- Proxy and token updates for ARC +- Doc updates for Microsoft charts repo release +- Bug fixes for trailing whitespaces in enable-monitoring.sh script +- Support for higher volume of prometheus metrics by scraping metrics from sidecar +- Update to get new version of telegraf - 1.18 +- Add label and field selectors for prometheus scraping using annotations +- Support for OSM integration +- Removed wireserver calls to get CA certs since access is removed +- Added liveness timeout for exec for linux containers + +### 02/23/2021 - +##### Version microsoft/oms:ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021 (linux) +##### Version microsoft/oms:win-ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021 (windows) +##### Code change log +- ContainerLogV2 schema support for LogAnalytics & ADX (not usable externally yet) +- Fix nodemetrics (cpuusageprecentage & memoryusagepercentage) metrics not flowing. This is fixed upstream for k8s versions >= 1.19.7 and >=1.20.2. +- Fix cpu & memory usage exceeded threshold container metrics not flowing when requests and/or limits were not set +- Mute some unused exceptions from going to telemetry +- Collect containerimage (repository, image & imagetag) from spec (instead of runtime) +- Add support for extension MSI for k8s arc +- Use cloud specific instrumentation keys for telemetry +- Picked up newer version for apt +- Add priority class to daemonset (in our chart only) + +### 01/11/2021 - +##### Version microsoft/oms:ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021 (linux) +##### Version microsoft/oms:win-ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021 (windows) +##### Code change log +- Fixes for Linux Agent Replicaset Pod OOMing issue +- Update fluentbit (1.14.2 to 1.6.8) for the Linux Daemonset +- Make Fluentbit settings: log_flush_interval_secs, tail_buf_chunksize_megabytes and tail_buf_maxsize_megabytes configurable via configmap +- Support for PV inventory collection +- Removal of Custom metric region check for Public cloud regions and update to use cloud environment variable to determine the custom metric support +- For daemonset pods, add the dnsconfig to use ndots: 3 from ndots:5 to optimize the number of DNS API calls made +- Fix for inconsistency in the collection container environment variables for the pods which has high number of containers +- Fix for disabling of std{out;err} log_collection_settings via configmap issue in windows daemonset +- Update to use workspace key from mount file rather than environment variable for windows daemonset agent +- Remove per container info logs in the container inventory +- Enable ADX route for windows container logs +- Remove logging to termination log in windows agent liveness probe + +### 11/09/2020 - +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) +##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) +##### Code change log +- Fix for duplicate windows metrics + +### 10/27/2020 - +##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) +##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020 (windows) +##### Code change log +- Activate oneagent in few AKS regions (koreacentral,norwayeast) +- Disable syslog +- Fix timeout for Windows daemonset liveness probe +- Make request == limit for Windows daemonset resources (cpu & memory) +- Schema v2 for container log (ADX only - applicable only for select customers for piloting) + +### 10/05/2020 - +##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) +##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) +##### Code change log +- Health CRD to version v1 (from v1beta1) for k8s versions >= 1.19.0 +- Collection of PV usage metrics for PVs mounted by pods (kube-system pods excluded by default)(doc-link-needed) +- Zero fill few custom metrics under a timer, also add zero filling for new PV usage metrics +- Collection of additional Kubelet metrics ('kubelet_running_pod_count','volume_manager_total_volumes','kubelet_node_config_error','process_resident_memory_bytes','process_cpu_seconds_total','kubelet_runtime_operations_total','kubelet_runtime_operations_errors_total'). This also includes updates to 'kubelet' workbook to include these new metrics +- Collection of Azure NPM (Network Policy Manager) metrics (basic & advanced. By default, NPM metrics collection is turned OFF)(doc-link-needed) +- Support log collection when docker root is changed with knode. Tracked by [this](https://github.com/Azure/AKS/issues/1373) issue +- Support for Pods in 'Terminating' state for nodelost scenarios +- Fix for reduction in telemetry for custom metrics ingestion failures +- Fix CPU capacity/limits metrics being 0 for Virtual nodes (VK) +- Add new custom metric regions (eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth) +- Enable strict SSL validation for AppInsights Ruby SDK +- Turn off custom metrics upload for unsupported cluster types +- Install CA certs from wire server for windows (in certain clouds) + +### 09/16/2020 - +> Note: This agent release targetted ONLY for non-AKS clusters via Azure Monitor for containers HELM chart update +##### Version microsoft/oms:ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020 (linux) +##### Version microsoft/oms:win-ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020 (windows) +##### Code change log +- Collection of Azure Network Policy Manager Basic and Advanced metrics +- Add support in Windows Agent for Container log collection of CRI runtimes such as ContainerD +- Alertable metrics support Arc K8s cluster to parity with AKS +- Support for multiple container log mount paths when docker is updated through knode +- Bug fix related to MDM telemetry + +### 08/07/2020 - +##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) +##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) +##### Code change log +- Collection of KubeState metrics for deployments and HPA +- Add the Proxy support for Windows agent +- Fix for ContainerState in ContainerInventory to handle Failed state and collection of environment variables for terminated and failed containers +- Change /spec to /metrics/cadvisor endpoint to collect node capacity metrics +- Disable Health Plugin by default and can enabled via configmap +- Pin version of jq to 1.5+dfsg-2 +- Bug fix for showing node as 'not ready' when there is disk pressure +- oneagent integration (disabled by default) +- Add region check before sending alertable metrics to MDM +- Telemetry fix for agent telemetry for sov. clouds + + ### 07/15/2020 - ##### Version microsoft/oms:ciprod07152020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07152020 (linux) ##### Version microsoft/oms:win-ciprod05262020-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod05262020-2 (windows) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 2a3e6001a..c6f51bb65 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -43,13 +43,47 @@ This needs to be co-ordinated with Red hat and ARO-RP team for the release and Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR https://github.com/Azure/aks-engine/pull/2318 -## ARO v4, On-prem K8s, Azure Arc K8s and OpenShift v4 clusters +## ARO v4, Azure Arc K8s and OpenShift v4 clusters Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly. Both the agent and helm chart will be replicated to `mcr.microsoft.com`. The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required. +## Microsoft Charts Repo release for On-prem K8s + +Since HELM charts repo being deprecated, Microsoft charts repo being used for HELM chart release of on-prem K8s clusters. +To make chart release PR, fork [Microsoft-charts-repo]([https://github.com/microsoft/charts/tree/gh-pages) and make the PR against `gh-pages` branch of the upstream repo. + +Refer PR - https://github.com/microsoft/charts/pull/23 for example. +Once the PR merged, latest version of HELM chart should be available in couple of mins in https://microsoft.github.io/charts/repo and https://artifacthub.io/. + +Instructions to create PR +``` +# 1. create helm package for the release candidate + git clone git@github.com:microsoft/Docker-Provider.git + git checkout ci_prod + cd ~/Docker-Provider/charts/azuremonitor-containers # this path based on where you have cloned the repo + helm package . + +# 2. clone your fork repo and checkout gh_pages branch # gh_pages branch used as release branch + cd ~ + git clone + cd ~/charts # assumed the root dir of the clone is charts + git checkout gh_pages + +# 3. copy release candidate helm package + cd ~/charts/repo/azuremonitor-containers + # update chart version value with the version of chart being released + cp ~/Docker-Provider/charts/azuremonitor-containers/azuremonitor-containers-.tgz . + cd ~/charts/repo + # update repo index file + helm repo index . + +# 4. Review the changes and make PR. Please note, you may need to revert unrelated changes automatically added by `helm repo index .` command + +``` + # 4. Monitor agent roll-out status In Container Insights Agent (AKS) telemetry dashboard, update the agent roll status by region chart with released agent image and track rollout status. If you see any issues with agent rollout, reach out AKS on-call team for the help on investigation and understanding whats going on. diff --git a/build/common/installer/scripts/tomlparser-prom-customconfig.rb b/build/common/installer/scripts/tomlparser-prom-customconfig.rb new file mode 100644 index 000000000..819c1956f --- /dev/null +++ b/build/common/installer/scripts/tomlparser-prom-customconfig.rb @@ -0,0 +1,423 @@ +#!/usr/local/bin/ruby + +#this should be require relative in Linux and require in windows, since it is a gem install on windows +@os_type = ENV["OS_TYPE"] +if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + require "tomlrb" +else + require_relative "tomlrb" +end +# require_relative "tomlrb" +require_relative "ConfigParseErrorLogger" +require "fileutils" + +@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" +@replicaset = "replicaset" +@daemonset = "daemonset" +@promSideCar = "prometheussidecar" +@windows = "windows" +@configSchemaVersion = "" +@defaultDsInterval = "1m" +@defaultDsPromUrls = [] +@defaultDsFieldPass = [] +@defaultDsFieldDrop = [] +@defaultRsInterval = "1m" +@defaultRsPromUrls = [] +@defaultRsFieldPass = [] +@defaultRsFieldDrop = [] +@defaultRsK8sServices = [] +# @defaultRsMonitorPods = false +@defaultCustomPrometheusInterval = "1m" +@defaultCustomPrometheusFieldPass = [] +@defaultCustomPrometheusFieldDrop = [] +@defaultCustomPrometheusMonitorPods = false +@defaultCustomPrometheusLabelSelectors = "" +@defaultCustomPrometheusFieldSelectors = "" + +#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering +@metricVersion = 2 +@monitorKubernetesPodsVersion = 2 +@urlTag = "scrapeUrl" +@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" +@responseTimeout = "15s" +@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +@insecureSkipVerify = true + +# Checking to see if this is the daemonset or replicaset to parse config accordingly +@controller = ENV["CONTROLLER_TYPE"] +@containerType = ENV["CONTAINER_TYPE"] +@sidecarScrapingEnabled = ENV["SIDECAR_SCRAPING_ENABLED"] + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@promConfigMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values for prometheus config map" + parsedConfig = Tomlrb.load_file(@promConfigMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted prometheus config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for prometheus scraping" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for prometheus config: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +def checkForTypeArray(arrayValue, arrayType) + if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) + return true + else + return false + end +end + +def checkForType(variable, varType) + if variable.nil? || variable.kind_of?(varType) + return true + else + return false + end +end + +def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file with no namespace filters" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", ("pod_scrape_scope = \"#{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}\"")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", ("kubernetes_label_selector = \"#{kubernetesLabelSelectors}\"")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", ("kubernetes_field_selector = \"#{kubernetesFieldSelectors}\"")) + rescue => errorStr + puts "Exception while replacing default pod monitor settings for custom prometheus scraping: #{errorStr}" + end + return new_contents +end + +def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + begin + puts "config::Starting to substitute the placeholders in telegraf conf copy file with namespace filters" + + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR") + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE") + + pluginConfigsWithNamespaces = "" + monitorKubernetesPodsNamespaces.each do |namespace| + if !namespace.nil? + #Stripping namespaces to remove leading and trailing whitespaces + namespace.strip! + if namespace.length > 0 + pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + interval = \"#{interval}\" + monitor_kubernetes_pods = true + pod_scrape_scope = \"#{(@controller.casecmp(@replicaset) == 0) ? "cluster" : "node"}\" + monitor_kubernetes_pods_namespace = \"#{namespace}\" + kubernetes_label_selector = \"#{kubernetesLabelSelectors}\" + kubernetes_field_selector = \"#{kubernetesFieldSelectors}\" + fieldpass = #{fieldPassSetting} + fielddrop = #{fieldDropSetting} + metric_version = #{@metricVersion} + url_tag = \"#{@urlTag}\" + bearer_token = \"#{@bearerToken}\" + response_timeout = \"#{@responseTimeout}\" + tls_ca = \"#{@tlsCa}\" + insecure_skip_verify = #{@insecureSkipVerify}\n" + end + end + end + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) + return new_contents + rescue => errorStr + puts "Exception while creating prometheus input plugins to filter namespaces for custom prometheus: #{errorStr}, using defaults" + replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + if !@controller.nil? + if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? + if @controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus replicaset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] + kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] + + # Remove below 4 lines after phased rollout + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] + kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] + + # Check for the right datatypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(kubernetesServices, String) && + checkForTypeArray(urls, String) && + # Remove below check after phased rollout + checkForType(kubernetesLabelSelectors, String) && + checkForType(kubernetesFieldSelectors, String) && + (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) # Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for replicaset" + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultRsInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop + kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices + urls = (urls.nil?) ? @defaultRsPromUrls : urls + # Remove below lines after phased rollout + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors + + file_name = "/opt/telegraf-test-rs.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf", file_name) + + puts "config::Starting to substitute the placeholders in telegraf conf copy file for replicaset" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", interval) + fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", fieldPassSetting) + fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", fieldDropSetting) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) + + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + # Remove below block after phased rollout + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && (@sidecarScrapingEnabled.casecmp("false") == 0))) + monitorKubernetesPodsNSConfig = [] + if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (monitorKubernetesPodsNamespaces.length > 0) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. + # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. + kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length + end + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") + file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") + # Remove below block after phased rollout + if (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && (@sidecarScrapingEnabled.casecmp("false") == 0))) + file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + end + + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for replicaset" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") + setRsPromDefaults + puts "****************End Prometheus Config Processing********************" + end + elsif @controller.casecmp(@daemonset) == 0 && + ((!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) || + (!@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0) && @sidecarScrapingEnabled.strip.casecmp("true") == 0) && + !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus custom config settings for monitor kubernetes pods + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] + kubernetesLabelSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_label_selector] + kubernetesFieldSelectors = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_field_selector] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForType(kubernetesLabelSelectors, String) && + checkForType(kubernetesFieldSelectors, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for custom prometheus scraping" + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultCustomPrometheusInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultCustomPrometheusFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultCustomPrometheusFieldDrop : fieldDrop + monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultCustomPrometheusMonitorPods : monitorKubernetesPods + kubernetesLabelSelectors = (kubernetesLabelSelectors.nil?) ? @defaultCustomPrometheusLabelSelectors : kubernetesLabelSelectors + kubernetesFieldSelectors = (kubernetesFieldSelectors.nil?) ? @defaultCustomPrometheusFieldSelectors : kubernetesFieldSelectors + + if !@os_type.nil? && !@os_type.empty? && @os_type.strip.casecmp("windows") == 0 + file_name = "/etc/telegraf/telegraf.conf" + else + file_name = "/opt/telegraf-test-prom-side-car.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf", file_name) + end + puts "config::Starting to substitute the placeholders in telegraf conf copy file for linux or conf file for windows for custom prometheus scraping" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", interval) + fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", fieldPassSetting) + fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" + new_contents = new_contents.gsub("$AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", fieldDropSetting) + + # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + monitorKubernetesPodsNSConfig = [] + if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (monitorKubernetesPodsNamespaces.length > 0) + new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length + monitorKubernetesPodsNSConfig = monitorKubernetesPodsNamespaces + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + else + new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods, kubernetesLabelSelectors, kubernetesFieldSelectors) + monitorKubernetesPodsNamespacesLength = 0 + end + + # Label and field selectors are passed as strings. For field selectors, split by commas to get the number of key-value pairs. + # Label selectors can be formatted as "app in (app1, app2, app3)", so split by commas only outside parentheses to get the number of key-value pairs. + kubernetesLabelSelectorsLength = kubernetesLabelSelectors.split(/,\s*(?=[^()]*(?:\(|$))/).length + kubernetesFieldSelectorsLength = kubernetesFieldSelectors.split(",").length + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for custom prometheus scraping" + #Set environment variables for telemetry in the sidecar container + if (!@containerType.nil? && @containerType.casecmp(@promSideCar) == 0) + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH=\"#{kubernetesLabelSelectorsLength}\"\n") + file.write("export TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH=\"#{kubernetesFieldSelectorsLength}\"\n") + + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for prometheus sidecar" + end + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for prometheus side car, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for promethues side car: #{errorStr}, using defaults") + puts "****************End Prometheus Config Processing********************" + end + elsif @controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? + #Get prometheus daemonset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:node][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:node][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:node][:urls] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(urls, String) + puts "config::Successfully passed typecheck for config settings for daemonset" + + #if setting is nil assign default values + interval = (interval.nil?) ? @defaultDsInterval : interval + fieldPass = (fieldPass.nil?) ? @defaultDsFieldPass : fieldPass + fieldDrop = (fieldDrop.nil?) ? @defaultDsFieldDrop : fieldDrop + urls = (urls.nil?) ? @defaultDsPromUrls : urls + + file_name = "/opt/telegraf-test.conf" + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf.conf", file_name) + + puts "config::Starting to substitute the placeholders in telegraf conf copy file for daemonset" + #Replace the placeholder config values with values from custom config + text = File.read(file_name) + new_contents = text.gsub("$AZMON_DS_PROM_INTERVAL", interval) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDPASS", ((fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDDROP", ((fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully substituted the placeholders in telegraf conf file for daemonset" + + #Set environment variables for telemetry + file = File.open("telemetry_prom_config_env_var", "w") + if !file.nil? + file.write("export TELEMETRY_DS_PROM_INTERVAL=\"#{interval}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_DS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export TELEMETRY_DS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export TELEMETRY_DS_PROM_URLS_LENGTH=#{urls.length}\n") + # Close file after writing all environment variables + file.close + puts "config::Successfully created telemetry file for daemonset" + end + else + ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for daemonset, using defaults, please use right types for all settings") + end # end of type check condition + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults, please check correctness of configmap") + puts "****************End Prometheus Config Processing********************" + end + end # end of controller type check + end + else + ConfigParseErrorLogger.logError("Controller undefined while processing prometheus config, using defaults") + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Prometheus Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@promConfigMapMountPath)) + ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported version") + else + puts "config::No configmap mounted for prometheus custom config, using defaults" + end +end +puts "****************End Prometheus Config Processing********************" diff --git a/build/linux/installer/conf/prometheus-side-car.conf b/build/linux/installer/conf/prometheus-side-car.conf new file mode 100644 index 000000000..fd40910d9 --- /dev/null +++ b/build/linux/installer/conf/prometheus-side-car.conf @@ -0,0 +1,4 @@ + + + + diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf new file mode 100644 index 000000000..720f54820 --- /dev/null +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -0,0 +1,28 @@ +[SERVICE] + #Default service flush interval is 15 seconds + Flush 15 + HTTP_Server Off + Daemon Off + storage.path /var/opt/microsoft/docker-cimprov/state/flbstore/ + storage.sync normal + storage.checksum off + storage.backlog.mem_limit 10M + Log_Level info + Parsers_File /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf + Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log + +[INPUT] + Name tcp + Tag oms.container.perf.telegraf.* + Listen 0.0.0.0 + Port 25229 + Chunk_Size 1m + Buffer_Size 1m + Mem_Buf_Limit 20m + +[OUTPUT] + Name oms + EnableTelemetry true + Retry_Limit 10 + TelemetryPushIntervalSeconds 300 + Match oms.container.* \ No newline at end of file diff --git a/build/linux/installer/conf/telegraf-prom-side-car.conf b/build/linux/installer/conf/telegraf-prom-side-car.conf new file mode 100644 index 000000000..b3b4ba1d3 --- /dev/null +++ b/build/linux/installer/conf/telegraf-prom-side-car.conf @@ -0,0 +1,162 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + hostName = "placeholder_hostname" + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 3000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 60000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "15s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = true + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Generic socket writer capable of handling multiple socket types. +[[outputs.socket_writer]] + ## URL to connect to + address = "tcp://0.0.0.0:25229" + # address = "tcp://example.com:http" + # address = "tcp4://127.0.0.1:8094" + # address = "tcp6://127.0.0.1:8094" + # address = "tcp6://[2001:db8::1]:8094" + # address = "udp://127.0.0.1:8094" + # address = "udp4://127.0.0.1:8094" + # address = "udp6://127.0.0.1:8094" + # address = "unix:///tmp/telegraf.sock" + # address = "unixgram:///tmp/telegraf.sock" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## Data format to generate. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + namedrop = ["agent_telemetry"] + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +[[processors.converter]] + [processors.converter.fields] + float = ["*"] + +#Prometheus Custom Metrics +[[inputs.prometheus]] + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE + + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP + + metric_version = 2 + url_tag = "scrapeUrl" + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER + +## OSM Prometheus configuration +$AZMON_TELEGRAF_OSM_PROM_PLUGINS diff --git a/build/linux/installer/conf/telegraf-rs.conf b/build/linux/installer/conf/telegraf-rs.conf index d81196330..ee1cf8819 100644 --- a/build/linux/installer/conf/telegraf-rs.conf +++ b/build/linux/installer/conf/telegraf-rs.conf @@ -540,13 +540,13 @@ #Prometheus Custom Metrics [[inputs.prometheus]] - interval = "$AZMON_RS_PROM_INTERVAL" + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" ## An array of urls to scrape metrics from. - urls = $AZMON_RS_PROM_URLS + urls = $AZMON_TELEGRAF_CUSTOM_PROM_URLS ## An array of Kubernetes services to scrape metrics from. - kubernetes_services = $AZMON_RS_PROM_K8S_SERVICES + kubernetes_services = $AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES ## Scrape Kubernetes pods for the following prometheus annotations: ## - prometheus.io/scrape: Enable scraping for this pod @@ -554,10 +554,15 @@ ## set this to `https` & most likely set the tls config. ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. ## - prometheus.io/port: If port is not 9102 use this annotation - $AZMON_RS_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS - fieldpass = $AZMON_RS_PROM_FIELDPASS - fielddrop = $AZMON_RS_PROM_FIELDDROP + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE + + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP metric_version = 2 url_tag = "scrapeUrl" @@ -581,7 +586,11 @@ insecure_skip_verify = true #tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"] -$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER + +## OSM Prometheus configuration +$AZMON_TELEGRAF_OSM_PROM_PLUGINS + # [[inputs.exec]] # ## Commands array # interval = "15m" diff --git a/build/linux/installer/conf/telegraf.conf b/build/linux/installer/conf/telegraf.conf index 202ac9741..5a5bb2d8c 100644 --- a/build/linux/installer/conf/telegraf.conf +++ b/build/linux/installer/conf/telegraf.conf @@ -675,7 +675,9 @@ ## An array of urls to scrape metrics from. urls = ["$CADVISOR_METRICS_URL"] - fieldpass = ["kubelet_running_pod_count","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] + # <= 1.18: metric name is kubelet_running_pod_count + # >= 1.19: metric name changed to kubelet_running_pods + fieldpass = ["kubelet_running_pod_count","kubelet_running_pods","volume_manager_total_volumes", "kubelet_node_config_error", "process_resident_memory_bytes", "process_cpu_seconds_total"] metric_version = 2 url_tag = "scrapeUrl" diff --git a/build/linux/installer/datafiles/base_container.data b/build/linux/installer/datafiles/base_container.data index c680f0eea..df8fbc3da 100644 --- a/build/linux/installer/datafiles/base_container.data +++ b/build/linux/installer/datafiles/base_container.data @@ -110,24 +110,28 @@ MAINTAINER: 'Microsoft Corporation' /opt/tomlrb/string_utils.rb; source/toml-parser/tomlrb/string_utils.rb; 644; root; root /opt/tomlrb/version.rb; source/toml-parser/tomlrb/version.rb; 644; root; root -/opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root -/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root -/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root -/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root -/opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root -/opt/tomlparser-prom-customconfig.rb; build/linux/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root -/opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root -/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root +/opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root +/etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf; build/linux/installer/conf/prometheus-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; build/linux/installer/conf/td-agent-bit.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf; build/linux/installer/conf/td-agent-bit-prom-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf; build/linux/installer/conf/td-agent-bit-rs.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf; build/linux/installer/conf/azm-containers-parser.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/out_oms.conf; build/linux/installer/conf/out_oms.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf.conf; build/linux/installer/conf/telegraf.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf; build/linux/installer/conf/telegraf-prom-side-car.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; build/linux/installer/conf/telegraf-rs.conf; 644; root; root +/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; build/linux/installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root +/opt/livenessprobe.sh; build/linux/installer/scripts/livenessprobe.sh; 755; root; root +/opt/tomlparser-prom-customconfig.rb; build/common/installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root +/opt/tomlparser-mdm-metrics-config.rb; build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb; 755; root; root +/opt/tomlparser-metric-collection-config.rb; build/linux/installer/scripts/tomlparser-metric-collection-config.rb; 755; root; root /opt/tomlparser-agent-config.rb; build/linux/installer/scripts/tomlparser-agent-config.rb; 755; root; root /opt/tomlparser.rb; build/common/installer/scripts/tomlparser.rb; 755; root; root /opt/td-agent-bit-conf-customizer.rb; build/common/installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root /opt/ConfigParseErrorLogger.rb; build/common/installer/scripts/ConfigParseErrorLogger.rb; 755; root; root /opt/tomlparser-npm-config.rb; build/linux/installer/scripts/tomlparser-npm-config.rb; 755; root; root +/opt/tomlparser-osm-config.rb; build/linux/installer/scripts/tomlparser-osm-config.rb; 755; root; root /opt/microsoft/omsagent/plugin/filter_cadvisor_health_container.rb; source/plugins/ruby/filter_cadvisor_health_container.rb; 644; root; root diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index e3f9fb475..a82fa28eb 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -26,15 +26,22 @@ then exit 1 fi -if [ ! -s "inotifyoutput.txt" ] +if [ -s "inotifyoutput.txt" ] then - # inotifyoutput file is empty and the grep commands for omsagent and td-agent-bit succeeded - exit 0 -else - if [ -s "inotifyoutput.txt" ] - then - # inotifyoutput file has data(config map was applied) - echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log - exit 1 - fi + # inotifyoutput file has data(config map was applied) + echo "inotifyoutput.txt has been updated - config changed" > /dev/termination-log + exit 1 fi + +# Perform the following check only for prometheus sidecar that does OSM scraping or for replicaset when sidecar scraping is disabled +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( ( ! -z "${SIDECAR_SCRAPING_ENABLED}" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ) ]]; then + if [ -s "inotifyoutput-osm.txt" ] + then + # inotifyoutput-osm file has data(config map was applied) + echo "inotifyoutput-osm.txt has been updated - config changed" > /dev/termination-log + exit 1 + fi +fi + +exit 0 diff --git a/build/linux/installer/scripts/tomlparser-osm-config.rb b/build/linux/installer/scripts/tomlparser-osm-config.rb new file mode 100644 index 000000000..096064db8 --- /dev/null +++ b/build/linux/installer/scripts/tomlparser-osm-config.rb @@ -0,0 +1,168 @@ +#!/usr/local/bin/ruby + +require_relative "tomlrb" +require "fileutils" +require_relative "ConfigParseErrorLogger" + +@controllerType = ENV["CONTROLLER_TYPE"] +@containerType = ENV["CONTAINER_TYPE"] +@sidecarScrapingEnabled = ENV["SIDECAR_SCRAPING_ENABLED"] + +@replicaset = "replicaset" +@prometheusSidecar = "prometheussidecar" + +if !@controllerType.nil? && !@controllerType.empty? && @controllerType.strip.casecmp(@replicaset) == 0 && + (@sidecarScrapingEnabled.nil? || (!@sidecarScrapingEnabled.nil? && !@sidecarScrapingEnabled.empty? && @sidecarScrapingEnabled.strip.casecmp("false") == 0)) + @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + @tgfTestConfigFile = "/opt/telegraf-test-rs.conf" +elsif !@containerType.nil? && !@containerType.empty? && @containerType.strip.casecmp(@prometheusSidecar) == 0 + @tgfConfigFile = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + @tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" +end + +@configMapMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" +@configSchemaVersion = "" +# @tgfConfigFileSidecar = "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" +# @tgfTestConfigFile = "/opt/telegraf-test-prom-side-car.conf" +@osmMetricNamespaces = [] + +#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering +@metricVersion = 2 +@monitorKubernetesPodsVersion = 2 +#@fieldPassSetting = "[\"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq\"]" +@fieldPassSetting = "[\"envoy_cluster_upstream_cx_total\", \"envoy_cluster_upstream_cx_connect_fail\", \"envoy_cluster_upstream_rq\", \"envoy_cluster_upstream_rq_xx\", \"envoy_cluster_upstream_rq_total\", \"envoy_cluster_upstream_rq_time_bucket\", \"envoy_cluster_upstream_cx_rx_bytes_total\", \"envoy_cluster_upstream_cx_tx_bytes_total\", \"envoy_cluster_upstream_cx_active\"]" +@scrapeInterval = "1m" +@urlTag = "scrapeUrl" +@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" +@responseTimeout = "15s" +@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" +@insecureSkipVerify = true + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@configMapMountPath)) + puts "config::configmap container-azm-ms-osmconfig for osm metrics found, parsing values" + parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted config map for osm metrics" + return parsedConfig + else + puts "config::configmap container-azm-ms-osmconfig for osm metrics not mounted, using defaults" + return nil + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while parsing config map for osm metrics: #{errorStr}, using defaults, please check config map for errors") + return nil + end +end + +def checkForTypeArray(arrayValue, arrayType) + if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) + return true + else + return false + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + begin + if !parsedConfig.nil? && + !parsedConfig[:osm_metric_collection_configuration].nil? && + !parsedConfig[:osm_metric_collection_configuration][:settings].nil? + osmPromMetricNamespaces = parsedConfig[:osm_metric_collection_configuration][:settings][:monitor_namespaces] + puts "config::osm::got:osm_metric_collection_configuration.settings.monitor_namespaces='#{osmPromMetricNamespaces}'" + + # Check to see if osm_metric_collection_configuration.settings has a valid setting for monitor_namespaces to enable scraping for specific namespaces + # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - + # - to use defaults in case of nil settings + if !osmPromMetricNamespaces.nil? && checkForTypeArray(osmPromMetricNamespaces, String) + # Adding a check to see if an empty array is passed for kubernetes namespaces + if (osmPromMetricNamespaces.length > 0) + @osmMetricNamespaces = osmPromMetricNamespaces + end + end + end + rescue => errorStr + puts "config::osm::error:Exception while reading config settings for osm configuration settings - #{errorStr}, using defaults" + @osmMetricNamespaces = [] + end +end + +def replaceOsmTelegrafConfigPlaceHolders + begin + #replace place holders in configuration file + tgfConfig = File.read(@tgfTestConfigFile) #read returns only after closing the file + + if @osmMetricNamespaces.length > 0 + osmPluginConfigsWithNamespaces = "" + @osmMetricNamespaces.each do |namespace| + if !namespace.nil? + #Stripping namespaces to remove leading and trailing whitespaces + namespace.strip! + if namespace.length > 0 + osmPluginConfigsWithNamespaces += "\n[[inputs.prometheus]] + name_prefix=\"container.azm.ms.osm/\" + interval = \"#{@scrapeInterval}\" + monitor_kubernetes_pods = true + pod_scrape_scope = \"#{(@controllerType.casecmp(@replicaset) == 0) ? "cluster" : "node"}\" + monitor_kubernetes_pods_namespace = \"#{namespace}\" + fieldpass = #{@fieldPassSetting} + metric_version = #{@metricVersion} + url_tag = \"#{@urlTag}\" + bearer_token = \"#{@bearerToken}\" + response_timeout = \"#{@responseTimeout}\" + tls_ca = \"#{@tlsCa}\" + insecure_skip_verify = #{@insecureSkipVerify}\n" + end + end + end + tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", osmPluginConfigsWithNamespaces) + else + puts "Using defaults for OSM configuration since there was an error in OSM config map or no namespaces were set" + tgfConfig = tgfConfig.gsub("$AZMON_TELEGRAF_OSM_PROM_PLUGINS", "") + end + File.open(@tgfTestConfigFile, "w") { |file| file.puts tgfConfig } # 'file' will be closed here after it goes out of scope + puts "config::osm::Successfully substituted the OSM placeholders in #{@tgfTestConfigFile} file in sidecar container" + rescue => errorStr + # TODO: test this scenario out + puts "config::osm::error:Exception while replacing telegraf configuration settings for osm - #{errorStr}, using defaults" + end +end + +@osmConfigSchemaVersion = ENV["AZMON_OSM_CFG_SCHEMA_VERSION"] +puts "****************Start OSM Config Processing********************" +if !@osmConfigSchemaVersion.nil? && !@osmConfigSchemaVersion.empty? && @osmConfigSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + # Check to see if the prometheus custom config parser has created a test config file so that we can replace the settings in the test file and run it, If not create + # a test config file by copying contents of the actual telegraf config file. + if (!File.exist?(@tgfTestConfigFile)) + # Copy the telegraf config file to a temp file to run telegraf in test mode with this config + puts "test telegraf config file #{@tgfTestConfigFile} does not exist, creating new one" + FileUtils.cp(@tgfConfigFile, @tgfTestConfigFile) + end + + replaceOsmTelegrafConfigPlaceHolders() + + # Write the telemetry to file, so that they can be set as environment variables + telemetryFile = File.open("integration_osm_config_env_var", "w") + + if !telemetryFile.nil? + telemetryFile.write("export TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT=#{@osmMetricNamespaces.length}\n") + # Close file after writing all environment variables + telemetryFile.close + else + puts "config::osm::Exception while opening file for writing OSM telemetry environment variables" + end + end +else + if (File.file?(@configMapMountPath)) + ConfigParseErrorLogger.logError("config::osm::unsupported/missing config schema version - '#{@osmConfigSchemaVersion}' , using defaults, please use supported schema version") + else + puts "config::No configmap mounted for OSM config, using defaults" + end +end +puts "****************End OSM Config Processing********************" diff --git a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb b/build/linux/installer/scripts/tomlparser-prom-customconfig.rb deleted file mode 100644 index 7aad580ee..000000000 --- a/build/linux/installer/scripts/tomlparser-prom-customconfig.rb +++ /dev/null @@ -1,267 +0,0 @@ -#!/usr/local/bin/ruby - -require_relative "tomlrb" -require_relative "ConfigParseErrorLogger" -require "fileutils" - -@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" -@replicaset = "replicaset" -@daemonset = "daemonset" -@configSchemaVersion = "" -@defaultDsInterval = "1m" -@defaultDsPromUrls = [] -@defaultDsFieldPass = [] -@defaultDsFieldDrop = [] -@defaultRsInterval = "1m" -@defaultRsPromUrls = [] -@defaultRsFieldPass = [] -@defaultRsFieldDrop = [] -@defaultRsK8sServices = [] -@defaultRsMonitorPods = false - -#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering -@metricVersion = 2 -@urlTag = "scrapeUrl" -@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token" -@responseTimeout = "15s" -@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" -@insecureSkipVerify = true - -# Use parser to parse the configmap toml file to a ruby structure -def parseConfigMap - begin - # Check to see if config map is created - if (File.file?(@promConfigMapMountPath)) - puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values for prometheus config map" - parsedConfig = Tomlrb.load_file(@promConfigMapMountPath, symbolize_keys: true) - puts "config::Successfully parsed mounted prometheus config map" - return parsedConfig - else - puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for prometheus scraping" - return nil - end - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config map for prometheus config: #{errorStr}, using defaults, please check config map for errors") - return nil - end -end - -def checkForTypeArray(arrayValue, arrayType) - if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType))))) - return true - else - return false - end -end - -def checkForType(variable, varType) - if variable.nil? || variable.kind_of?(varType) - return true - else - return false - end -end - -def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - begin - new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}")) - new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "") - rescue => errorStr - puts "Exception while replacing default pod monitor settings: #{errorStr}" - end - return new_contents -end - -def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) - begin - new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_RS_PROM_MONITOR_PODS") - pluginConfigsWithNamespaces = "" - monitorKubernetesPodsNamespaces.each do |namespace| - if !namespace.nil? - #Stripping namespaces to remove leading and trailing whitespaces - namespace.strip! - if namespace.length > 0 - pluginConfigsWithNamespaces += "\n[[inputs.prometheus]] - interval = \"#{interval}\" - monitor_kubernetes_pods = true - monitor_kubernetes_pods_namespace = \"#{namespace}\" - fieldpass = #{fieldPassSetting} - fielddrop = #{fieldDropSetting} - metric_version = #{@metricVersion} - url_tag = \"#{@urlTag}\" - bearer_token = \"#{@bearerToken}\" - response_timeout = \"#{@responseTimeout}\" - tls_ca = \"#{@tlsCa}\" - insecure_skip_verify = #{@insecureSkipVerify}\n" - end - end - end - new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces) - return new_contents - rescue => errorStr - puts "Exception while creating prometheus input plugins to filter namespaces: #{errorStr}, using defaults" - replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - end -end - -# Use the ruby structure created after config parsing to set the right values to be used as environment variables -def populateSettingValuesFromConfigMap(parsedConfig) - # Checking to see if this is the daemonset or replicaset to parse config accordingly - controller = ENV["CONTROLLER_TYPE"] - if !controller.nil? - if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? - if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? - #Get prometheus replicaset custom config settings - begin - interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] - fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] - fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] - urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] - kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] - monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] - monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces] - - # Check for the right datattypes to enforce right setting values - if checkForType(interval, String) && - checkForTypeArray(fieldPass, String) && - checkForTypeArray(fieldDrop, String) && - checkForTypeArray(kubernetesServices, String) && - checkForTypeArray(urls, String) && - (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby - puts "config::Successfully passed typecheck for config settings for replicaset" - #if setting is nil assign default values - interval = (interval.nil?) ? @defaultRsInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultRsFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop - kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices - urls = (urls.nil?) ? @defaultRsPromUrls : urls - monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods - - file_name = "/opt/telegraf-test-rs.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf", file_name) - - puts "config::Starting to substitute the placeholders in telegraf conf copy file for replicaset" - #Replace the placeholder config values with values from custom config - text = File.read(file_name) - new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) - fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting) - fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]" - new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting) - new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]")) - - # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces - # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able - - # - to use defaults in case of nil settings - if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String) - new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting) - monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length - else - new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods) - monitorKubernetesPodsNamespacesLength = 0 - end - - File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset" - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") - file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") - file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") - file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n") - - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for replicaset" - end - else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings") - end # end of type check condition - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults") - setRsPromDefaults - puts "****************End Prometheus Config Processing********************" - end - elsif controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? - #Get prometheus daemonset custom config settings - begin - interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] - fieldPass = parsedConfig[:prometheus_data_collection_settings][:node][:fieldpass] - fieldDrop = parsedConfig[:prometheus_data_collection_settings][:node][:fielddrop] - urls = parsedConfig[:prometheus_data_collection_settings][:node][:urls] - - # Check for the right datattypes to enforce right setting values - if checkForType(interval, String) && - checkForTypeArray(fieldPass, String) && - checkForTypeArray(fieldDrop, String) && - checkForTypeArray(urls, String) - puts "config::Successfully passed typecheck for config settings for daemonset" - - #if setting is nil assign default values - interval = (interval.nil?) ? @defaultDsInterval : interval - fieldPass = (fieldPass.nil?) ? @defaultDsFieldPass : fieldPass - fieldDrop = (fieldDrop.nil?) ? @defaultDsFieldDrop : fieldDrop - urls = (urls.nil?) ? @defaultDsPromUrls : urls - - file_name = "/opt/telegraf-test.conf" - # Copy the telegraf config file to a temp file to run telegraf in test mode with this config - FileUtils.cp("/etc/opt/microsoft/docker-cimprov/telegraf.conf", file_name) - - puts "config::Starting to substitute the placeholders in telegraf conf copy file for daemonset" - #Replace the placeholder config values with values from custom config - text = File.read(file_name) - new_contents = text.gsub("$AZMON_DS_PROM_INTERVAL", interval) - new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDPASS", ((fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDDROP", ((fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]")) - new_contents = new_contents.gsub("$AZMON_DS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]")) - File.open(file_name, "w") { |file| file.puts new_contents } - puts "config::Successfully substituted the placeholders in telegraf conf file for daemonset" - - #Set environment variables for telemetry - file = File.open("telemetry_prom_config_env_var", "w") - if !file.nil? - file.write("export TELEMETRY_DS_PROM_INTERVAL=\"#{interval}\"\n") - #Setting array lengths as environment variables for telemetry purposes - file.write("export TELEMETRY_DS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") - file.write("export TELEMETRY_DS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") - file.write("export TELEMETRY_DS_PROM_URLS_LENGTH=#{urls.length}\n") - # Close file after writing all environment variables - file.close - puts "config::Successfully created telemetry file for daemonset" - end - else - ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for daemonset, using defaults, please use right types for all settings") - end # end of type check condition - rescue => errorStr - ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults, please check correctness of configmap") - puts "****************End Prometheus Config Processing********************" - end - end # end of controller type check - end - else - ConfigParseErrorLogger.logError("Controller undefined while processing prometheus config, using defaults") - end -end - -@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] -puts "****************Start Prometheus Config Processing********************" -if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it - configMapSettings = parseConfigMap - if !configMapSettings.nil? - populateSettingValuesFromConfigMap(configMapSettings) - end -else - if (File.file?(@promConfigMapMountPath)) - ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported version") - else - puts "config::No configmap mounted for prometheus custom config, using defaults" - end -end -puts "****************End Prometheus Config Processing********************" diff --git a/build/version b/build/version index 2da3efa39..83a0a174b 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=13 +CONTAINER_BUILDVERSION_MAJOR=14 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210223 +CONTAINER_BUILDVERSION_DATE=20210326 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/build/windows/installer/conf/fluent-bit.conf b/build/windows/installer/conf/fluent-bit.conf index 879ee4810..1eebe5fd6 100644 --- a/build/windows/installer/conf/fluent-bit.conf +++ b/build/windows/installer/conf/fluent-bit.conf @@ -12,6 +12,15 @@ Chunk_Size 32 Buffer_Size 64 +[INPUT] + Name tcp + Tag oms.container.perf.telegraf.* + Listen 0.0.0.0 + Port 25229 + Chunk_Size 32 + Buffer_Size 64 + Mem_Buf_Limit 5m + [OUTPUT] Name oms EnableTelemetry true diff --git a/build/windows/installer/conf/telegraf.conf b/build/windows/installer/conf/telegraf.conf new file mode 100644 index 000000000..5f4d2364e --- /dev/null +++ b/build/windows/installer/conf/telegraf.conf @@ -0,0 +1,162 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + hostName = "placeholder_hostname" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "15s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = true + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Generic socket writer capable of handling multiple socket types. +[[outputs.socket_writer]] + ## URL to connect to + address = "tcp://0.0.0.0:25229" + # address = "tcp://example.com:http" + # address = "tcp4://127.0.0.1:8094" + # address = "tcp6://127.0.0.1:8094" + # address = "tcp6://[2001:db8::1]:8094" + # address = "udp://127.0.0.1:8094" + # address = "udp4://127.0.0.1:8094" + # address = "udp6://127.0.0.1:8094" + # address = "unix:///tmp/telegraf.sock" + # address = "unixgram:///tmp/telegraf.sock" + + ## Optional TLS Config + # tls_ca = "/etc/telegraf/ca.pem" + # tls_cert = "/etc/telegraf/cert.pem" + # tls_key = "/etc/telegraf/key.pem" + ## Use TLS but skip chain & host verification + # insecure_skip_verify = false + + ## Period between keep alive probes. + ## Only applies to TCP sockets. + ## 0 disables keep alive probes. + ## Defaults to the OS configuration. + # keep_alive_period = "5m" + + ## Data format to generate. + ## Each data format has its own unique set of configuration options, read + ## more about them here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md + data_format = "json" + namedrop = ["agent_telemetry"] + #tagdrop = ["AgentVersion","AKS_RESOURCE_ID", "ACS_RESOURCE_NAME", "Region","ClusterName","ClusterType", "Computer", "ControllerType"] + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +[[processors.converter]] + [processors.converter.fields] + float = ["*"] + +#Prometheus Custom Metrics +[[inputs.prometheus]] + interval = "$AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL" + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + $AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS + $AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR + $AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR + + fieldpass = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS + fielddrop = $AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP + + metric_version = 2 + url_tag = "scrapeUrl" + ## Kubernetes config file to create client from. + # kube_config = "/path/to/kubernetes.config" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + ## OR + # bearer_token_string = "abc_123" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + #tls_cert = /path/to/certfile + # tls_key = /path/to/keyfile + ## Use TLS but skip chain & host verification + insecure_skip_verify = true + +$AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index ce64fd1ce..9c8014ed0 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.8.1 +version: 2.8.2 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 82d210f3d..8868b86bb 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -81,6 +81,12 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: PODNAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SIDECAR_SCRAPING_ENABLED + value: "false" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 615cd0485..7201ee6ae 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -131,6 +131,7 @@ spec: - "/opt/livenessprobe.sh" initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 {{- with .Values.omsagent.daemonset.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 012dd2720..fdc520cba 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -72,7 +72,9 @@ spec: value: {{ .Values.Azure.Extension.Name | quote }} {{- end }} - name: USER_ASSIGNED_IDENTITY_CLIENT_ID - value: "" + value: "" + - name: SIDECAR_SCRAPING_ENABLED + value: "false" - name: ISTEST value: {{ .Values.omsagent.ISTEST | quote }} securityContext: @@ -109,6 +111,9 @@ spec: - mountPath: /etc/config/settings/adx name: omsagent-adx-secret readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true livenessProbe: exec: command: @@ -117,6 +122,7 @@ spec: - "/opt/livenessprobe.sh" initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 {{- with .Values.omsagent.deployment.affinity }} affinity: {{- toYaml . | nindent 8 }} {{- end }} @@ -158,4 +164,8 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true {{- end }} diff --git a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml index 5db5c2dab..c0a6e3722 100644 --- a/charts/azuremonitor-containers/templates/omsagent-rbac.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-rbac.yaml @@ -28,7 +28,7 @@ rules: resources: ["healthstates"] verbs: ["get", "create", "patch"] - apiGroups: ["clusterconfig.azure.com"] - resources: ["azureclusteridentityrequests"] + resources: ["azureclusteridentityrequests", "azureclusteridentityrequests/status"] resourceNames: ["container-insights-clusteridentityrequest"] verbs: ["get", "create", "patch"] - nonResourceURLs: ["/metrics"] diff --git a/charts/azuremonitor-containers/templates/omsagent-secret.yaml b/charts/azuremonitor-containers/templates/omsagent-secret.yaml index 1a7f087ed..8c245338c 100644 --- a/charts/azuremonitor-containers/templates/omsagent-secret.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-secret.yaml @@ -13,7 +13,19 @@ data: WSID: {{ required "A valid workspace id is required!" .Values.omsagent.secret.wsid | b64enc | quote }} KEY: {{ required "A valid workspace key is required!" .Values.omsagent.secret.key | b64enc | quote }} DOMAIN: {{ .Values.omsagent.domain | b64enc | quote }} - {{- if ne .Values.omsagent.proxy "" }} + {{- $httpsProxyDict := urlParse .Values.Azure.proxySettings.httpsProxy -}} + {{- $httpProxyDict := urlParse .Values.Azure.proxySettings.httpProxy -}} + {{- if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpsProxy)) ($httpsProxyDict.userinfo) }} + PROXY: {{ .Values.Azure.proxySettings.httpsProxy | b64enc | quote }} + {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpsProxy)) (empty $httpsProxyDict.userinfo) }} + # adding arbitrary creds since omsagent expects arbitrary creds in case of no auth + PROXY: {{ urlJoin (dict "scheme" $httpsProxyDict.scheme "userinfo" "admin:secret" "host" $httpsProxyDict.host) | b64enc | quote }} + {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpProxy)) ($httpProxyDict.userinfo) }} + PROXY: {{ .Values.Azure.proxySettings.httpProxy | b64enc | quote }} + {{- else if and (and (.Values.Azure.proxySettings.isProxyEnabled) (.Values.Azure.proxySettings.httpProxy)) (empty $httpProxyDict.userinfo) }} + # adding arbitrary creds since omsagent expects arbitrary creds in case of no auth + PROXY: {{ urlJoin (dict "scheme" $httpProxyDict.scheme "userinfo" "admin:secret" "host" $httpProxyDict.host) | b64enc | quote }} + {{- else if ne .Values.omsagent.proxy "" }} PROXY: {{ .Values.omsagent.proxy | b64enc | quote }} {{- end }} {{- end }} diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index afe789d56..fc2a3afaf 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,13 +12,19 @@ Azure: Extension: Name: "" ResourceId: "" + proxySettings: + isProxyEnabled: false + httpProxy: "" + httpsProxy: "" + noProxy: "" + proxyCert: "" omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod02232021" - tagWindows: "win-ciprod02232021" + tag: "ciprod03262021" + tagWindows: "win-ciprod03262021" pullPolicy: IfNotPresent - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" agentVersion: "1.10.0.1" # The priority used by the omsagent priority class for the daemonset pods diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index aec1bb456..e38d9b4ab 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -76,6 +76,17 @@ data: ## ex: monitor_kubernetes_pods_namespaces = ["default1", "default2", "default3"] # monitor_kubernetes_pods_namespaces = ["default1"] + ## Label selector to target pods which have the specified label + ## This will take effect when monitor_kubernetes_pods is set to true + ## Reference the docs at https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + # kubernetes_label_selector = "env=dev,app=nginx" + + ## Field selector to target pods which have the specified field + ## This will take effect when monitor_kubernetes_pods is set to true + ## Reference the docs at https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/ + ## eg. To scrape pods on a specific node + # kubernetes_field_selector = "spec.nodeName=$HOSTNAME" + [prometheus_data_collection_settings.node] # Node level scrape endpoint(s). These metrics will be scraped from agent's DaemonSet running in every node in the cluster # Any errors related to prometheus scraping can be found in the KubeMonAgentEvents table in the Log Analytics workspace that the cluster is sending data to. diff --git a/kubernetes/container-azm-ms-osmconfig.yaml b/kubernetes/container-azm-ms-osmconfig.yaml new file mode 100644 index 000000000..05b7ac3ed --- /dev/null +++ b/kubernetes/container-azm-ms-osmconfig.yaml @@ -0,0 +1,17 @@ +kind: ConfigMap +apiVersion: v1 +data: + schema-version: + #string.used by agent to parse OSM config. supported versions are {v1}. Configs with other schema versions will be rejected by the agent. + v1 + config-version: + #string.used by OSM addon team to keep track of this config file's version in their source control/repository (max allowed 10 chars, other chars will be truncated) + ver1 + osm-metric-collection-configuration: |- + # OSM metric collection settings + [osm_metric_collection_configuration.settings] + # Namespaces to monitor + # monitor_namespaces = ["namespace1", "namespace2"] +metadata: + name: container-azm-ms-osmconfig + namespace: kube-system diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index bee718a31..76b8622b4 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod02232021 +ARG IMAGE_TAG=ciprod03262021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi @@ -17,7 +17,7 @@ ENV KUBE_CLIENT_BACKOFF_BASE 1 ENV KUBE_CLIENT_BACKOFF_DURATION 0 ENV RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR 0.9 RUN /usr/bin/apt-get update && /usr/bin/apt-get install -y libc-bin wget openssl curl sudo python-ctypes init-system-helpers net-tools rsyslog cron vim dmidecode apt-transport-https gnupg && rm -rf /var/lib/apt/lists/* -COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs mdsd.xml envmdsd $tmpdir/ +COPY setup.sh main.sh defaultpromenvvariables defaultpromenvvariables-rs defaultpromenvvariables-sidecar mdsd.xml envmdsd $tmpdir/ WORKDIR ${tmpdir} # copy docker provider shell bundle to use the agent image diff --git a/kubernetes/linux/defaultpromenvvariables-rs b/kubernetes/linux/defaultpromenvvariables-rs index 1346e62b9..920f4e90e 100644 --- a/kubernetes/linux/defaultpromenvvariables-rs +++ b/kubernetes/linux/defaultpromenvvariables-rs @@ -1,7 +1,12 @@ -export AZMON_RS_PROM_INTERVAL="1m" -export AZMON_RS_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" -export AZMON_RS_PROM_FIELDPASS="[]" -export AZMON_RS_PROM_FIELDDROP="[]" -export AZMON_RS_PROM_URLS="[]" -export AZMON_RS_PROM_K8S_SERVICES="[]" -export AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE="pod_scrape_scope = 'cluster'" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_URLS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_K8S_SERVICES="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = ''" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = ''" + diff --git a/kubernetes/linux/defaultpromenvvariables-sidecar b/kubernetes/linux/defaultpromenvvariables-sidecar new file mode 100644 index 000000000..3301488d8 --- /dev/null +++ b/kubernetes/linux/defaultpromenvvariables-sidecar @@ -0,0 +1,9 @@ +export AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL="1m" +export AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS="monitor_kubernetes_pods = false" +export AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE="pod_scrape_scope = 'node'" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP="[]" +export AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER="" +export AZMON_TELEGRAF_OSM_PROM_PLUGINS="" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR="kubernetes_label_selector = ''" +export AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR="kubernetes_field_selector = ''" diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index c4067f25e..71e46875b 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -2,7 +2,17 @@ if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf +elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "setting omsagent conf file for prometheus sidecar" + cat /etc/opt/microsoft/docker-cimprov/prometheus-side-car.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf + # omsadmin.sh replaces %MONITOR_AGENT_PORT% and %SYSLOG_PORT% in the monitor.conf and syslog.conf with default ports 25324 and 25224. + # Since we are running 2 omsagents in the same pod, we need to use a different port for the sidecar, + # else we will see the Address already in use - bind(2) for 0.0.0.0:253(2)24 error. + # Look into omsadmin.sh scripts's configure_monitor_agent()/configure_syslog() and find_available_port() methods for more info. + sed -i -e 's/port %MONITOR_AGENT_PORT%/port 25326/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/monitor.conf + sed -i -e 's/port %SYSLOG_PORT%/port 25226/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf else + echo "setting omsagent conf file for daemonset" sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf fi sed -i -e 's/bind 127.0.0.1/bind 0.0.0.0/g' /etc/opt/microsoft/omsagent/sysconf/omsagent.d/syslog.conf @@ -28,6 +38,12 @@ sudo setfacl -m user:omsagent:rwx /var/opt/microsoft/docker-cimprov/log #Run inotify as a daemon to track changes to the mounted configmap. inotifywait /etc/config/settings --daemon --recursive --outfile "/opt/inotifyoutput.txt" --event create,delete --format '%e : %T' --timefmt '+%s' +#Run inotify as a daemon to track changes to the mounted configmap for OSM settings. +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then + inotifywait /etc/config/osm-settings --daemon --recursive --outfile "/opt/inotifyoutput-osm.txt" --event create,delete --format '%e : %T' --timefmt '+%s' +fi + #resourceid override for loganalytics data. if [ -z $AKS_RESOURCE_ID ]; then echo "not setting customResourceId" @@ -68,6 +84,24 @@ if [ -e "/etc/config/settings/config-version" ] && [ -s "/etc/config/settings/ echo "AZMON_AGENT_CFG_FILE_VERSION:$AZMON_AGENT_CFG_FILE_VERSION" fi +#set OSM config schema version +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then + if [ -e "/etc/config/osm-settings/schema-version" ] && [ -s "/etc/config/osm-settings/schema-version" ]; then + #trim + osm_config_schema_version="$(cat /etc/config/osm-settings/schema-version | xargs)" + #remove all spaces + osm_config_schema_version="${osm_config_schema_version//[[:space:]]/}" + #take first 10 characters + osm_config_schema_version="$(echo $osm_config_schema_version| cut -c1-10)" + + export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version + echo "export AZMON_OSM_CFG_SCHEMA_VERSION=$osm_config_schema_version" >> ~/.bashrc + source ~/.bashrc + echo "AZMON_OSM_CFG_SCHEMA_VERSION:$AZMON_OSM_CFG_SCHEMA_VERSION" + fi +fi + export PROXY_ENDPOINT="" # Check for internet connectivity or workspace deletion @@ -193,71 +227,58 @@ echo "export TELEMETRY_APPLICATIONINSIGHTS_KEY=$aikey" >> ~/.bashrc source ~/.bashrc +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + #Parse the configmap to set the right environment variables. + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb -#Parse the configmap to set the right environment variables. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser.rb - -cat config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source config_env_var - + cat config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_env_var +fi #Parse the configmap to set the right environment variables for agent config. #Note > tomlparser-agent-config.rb has to be parsed first before td-agent-bit-conf-customizer.rb for fbit agent settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-agent-config.rb -cat agent_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source agent_config_env_var + cat agent_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc + done + source agent_config_env_var -#Parse the configmap to set the right environment variables for network policy manager (npm) integration. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb + #Parse the configmap to set the right environment variables for network policy manager (npm) integration. + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-npm-config.rb -cat integration_npm_config_env_var | while read line; do - #echo $line - echo $line >> ~/.bashrc -done -source integration_npm_config_env_var + cat integration_npm_config_env_var | while read line; do + #echo $line + echo $line >> ~/.bashrc + done + source integration_npm_config_env_var +fi #Replace the placeholders in td-agent-bit.conf file for fluentbit with custom/default values in daemonset -if [ ! -e "/etc/config/kube.conf" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then /opt/microsoft/omsagent/ruby/bin/ruby td-agent-bit-conf-customizer.rb fi #Parse the prometheus configmap to create a file with new custom settings. /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-prom-customconfig.rb -#If config parsing was successful, a copy of the conf file with replaced custom settings file is created -if [ ! -e "/etc/config/kube.conf" ]; then - if [ -e "/opt/telegraf-test.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi -else - if [ -e "/opt/telegraf-test-rs.conf" ]; then - echo "****************Start Telegraf in Test Mode**************************" - /opt/telegraf --config /opt/telegraf-test-rs.conf -test - if [ $? -eq 0 ]; then - mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" - fi - echo "****************End Telegraf Run in Test Mode**************************" - fi -fi - #Setting default environment variables to be used in any case of failure in the above steps if [ ! -e "/etc/config/kube.conf" ]; then - cat defaultpromenvvariables | while read line; do - echo $line >> ~/.bashrc - done - source defaultpromenvvariables + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + cat defaultpromenvvariables-sidecar | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables-sidecar + else + cat defaultpromenvvariables | while read line; do + echo $line >> ~/.bashrc + done + source defaultpromenvvariables + fi else cat defaultpromenvvariables-rs | while read line; do echo $line >> ~/.bashrc @@ -273,21 +294,37 @@ if [ -e "telemetry_prom_config_env_var" ]; then source telemetry_prom_config_env_var fi + #Parse the configmap to set the right environment variables for MDM metrics configuration for Alerting. -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb +if [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-mdm-metrics-config.rb -cat config_mdm_metrics_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_mdm_metrics_env_var + cat config_mdm_metrics_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_mdm_metrics_env_var -#Parse the configmap to set the right environment variables for metric collection settings -/opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb + #Parse the configmap to set the right environment variables for metric collection settings + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-metric-collection-config.rb -cat config_metric_collection_env_var | while read line; do - echo $line >> ~/.bashrc -done -source config_metric_collection_env_var + cat config_metric_collection_env_var | while read line; do + echo $line >> ~/.bashrc + done + source config_metric_collection_env_var +fi + +# OSM scraping to be done in replicaset if sidecar car scraping is disabled and always do the scraping from the sidecar (It will always be either one of the two) +if [[ ( ( ! -e "/etc/config/kube.conf" ) && ( "${CONTAINER_TYPE}" == "PrometheusSidecar" ) ) || + ( ( -e "/etc/config/kube.conf" ) && ( "${SIDECAR_SCRAPING_ENABLED}" == "false" ) ) ]]; then + /opt/microsoft/omsagent/ruby/bin/ruby tomlparser-osm-config.rb + + if [ -e "integration_osm_config_env_var" ]; then + cat integration_osm_config_env_var | while read line; do + echo $line >> ~/.bashrc + done + source integration_osm_config_env_var + fi +fi #Setting environment variable for CAdvisor metrics to use port 10255/10250 based on curl request echo "Making wget request to cadvisor endpoint with port 10250" @@ -511,7 +548,7 @@ fi #start oneagent -if [ ! -e "/etc/config/kube.conf" ]; then +if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidecar" ]; then if [ ! -z $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE ]; then echo "container logs configmap route is $AZMON_CONTAINER_LOGS_ROUTE" echo "container logs effective route is $AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE" @@ -552,18 +589,56 @@ if [ ! -e "/etc/config/kube.conf" ]; then fi echo "************end oneagent log routing checks************" +#If config parsing was successful, a copy of the conf file with replaced custom settings file is created +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ] && [ -e "/opt/telegraf-test-prom-side-car.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-prom-side-car.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-prom-side-car.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + else + if [ -e "/opt/telegraf-test.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test.conf" "/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi + fi +else + if [ -e "/opt/telegraf-test-rs.conf" ]; then + echo "****************Start Telegraf in Test Mode**************************" + /opt/telegraf --config /opt/telegraf-test-rs.conf -test + if [ $? -eq 0 ]; then + mv "/opt/telegraf-test-rs.conf" "/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" + fi + echo "****************End Telegraf Run in Test Mode**************************" + fi +fi + #telegraf & fluentbit requirements if [ ! -e "/etc/config/kube.conf" ]; then - if [ "$CONTAINER_RUNTIME" == "docker" ]; then - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "starting fluent-bit and setting telegraf conf file for prometheus sidecar" + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-prom-side-car.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-prom-side-car.conf" else - echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" - sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf - /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & - telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + echo "starting fluent-bit and setting telegraf conf file for daemonset" + if [ "$CONTAINER_RUNTIME" == "docker" ]; then + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + else + echo "since container run time is $CONTAINER_RUNTIME update the container log fluentbit Parser to cri from docker" + sed -i 's/Parser.docker*/Parser cri/' /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf + /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit.conf -e /opt/td-agent-bit/bin/out_oms.so & + telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf.conf" + fi fi else + echo "starting fluent-bit and setting telegraf conf file for replicaset" /opt/td-agent-bit/bin/td-agent-bit -c /etc/opt/microsoft/docker-cimprov/td-agent-bit-rs.conf -e /opt/td-agent-bit/bin/out_oms.so & telegrafConfFile="/etc/opt/microsoft/docker-cimprov/telegraf-rs.conf" fi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index fe6c0565a..218e3c717 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -60,7 +60,13 @@ sudo apt-get install libcap2-bin -y #service telegraf stop -wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf +#wget https://github.com/microsoft/Docker-Provider/releases/download/5.0.0.0/telegraf + +#1.18 pre-release +wget https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_linux_amd64.tar.gz +tar -zxvf telegraf-1.18.0_linux_amd64.tar.gz + +mv /opt/telegraf-1.18.0/usr/bin/telegraf /opt/telegraf chmod 777 /opt/telegraf diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index ebf0257af..206d9a8f0 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" imagePullPolicy: IfNotPresent resources: limits: @@ -443,6 +443,61 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 +#Only in sidecar scraping mode + - name: omsagent-prometheus + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + imagePullPolicy: IfNotPresent + resources: + limits: + cpu: 500m + memory: 400Mi + requests: + cpu: 75m + memory: 225Mi + env: + # azure devops pipeline uses AKS_RESOURCE_ID and AKS_REGION hence ensure to uncomment these + - name: AKS_RESOURCE_ID + value: "VALUE_AKS_RESOURCE_ID_VALUE" + - name: AKS_REGION + value: "VALUE_AKS_RESOURCE_REGION_VALUE" + #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters + #- name: ACS_RESOURCE_NAME + # value: "my_acs_cluster_name" + - name: CONTAINER_TYPE + value: "PrometheusSidecar" + - name: CONTROLLER_TYPE + value: "DaemonSet" + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + # Update this with the user assigned msi client id for omsagent + - name: USER_ASSIGNED_IDENTITY_CLIENT_ID + value: "" + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/kubernetes/host + name: azure-json-path + - mountPath: /etc/omsagent-secret + name: omsagent-secret + readOnly: true + - mountPath: /etc/config/settings + name: settings-vol-config + readOnly: true + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config + readOnly: true + livenessProbe: + exec: + command: + - /bin/bash + - -c + - /opt/livenessprobe.sh + initialDelaySeconds: 60 + periodSeconds: 60 + timeoutSeconds: 15 affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -502,6 +557,10 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true --- apiVersion: apps/v1 kind: Deployment @@ -524,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" imagePullPolicy: IfNotPresent resources: limits: @@ -559,6 +618,9 @@ spec: # Update this with the user assigned msi client id for omsagent - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" + # Add the below environment variable to true only in sidecar enabled regions, else set it to false + - name: SIDECAR_SCRAPING_ENABLED + value: "true" securityContext: privileged: true ports: @@ -586,6 +648,8 @@ spec: readOnly: true - mountPath: /etc/config/settings/adx name: omsagent-adx-secret + - mountPath: /etc/config/osm-settings + name: osm-settings-vol-config readOnly: true livenessProbe: exec: @@ -595,6 +659,7 @@ spec: - /opt/livenessprobe.sh initialDelaySeconds: 60 periodSeconds: 60 + timeoutSeconds: 15 affinity: nodeAffinity: # affinity to schedule on to ephemeral os node if its available @@ -658,6 +723,10 @@ spec: secret: secretName: omsagent-adx-secret optional: true + - name: osm-settings-vol-config + configMap: + name: container-azm-ms-osmconfig + optional: true --- apiVersion: apps/v1 kind: DaemonSet @@ -681,7 +750,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "13.0.0-0" + dockerProviderVersion: "14.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -691,7 +760,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021" imagePullPolicy: IfNotPresent resources: limits: @@ -711,10 +780,16 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: PODNAME + valueFrom: + fieldRef: + fieldPath: metadata.name - name: NODE_IP valueFrom: fieldRef: fieldPath: status.hostIP + - name: SIDECAR_SCRAPING_ENABLED + value: "true" volumeMounts: - mountPath: C:\ProgramData\docker\containers name: docker-windows-containers diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index d4f118449..e4ace417a 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod02232021 +ARG IMAGE_TAG=win-ciprod03262021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement @@ -47,6 +47,7 @@ RUN ./setup.ps1 COPY main.ps1 /opt/omsagentwindows/scripts/powershell COPY ./omsagentwindows/installer/scripts/filesystemwatcher.ps1 /opt/omsagentwindows/scripts/powershell COPY ./omsagentwindows/installer/scripts/livenessprobe.cmd /opt/omsagentwindows/scripts/cmd/ +COPY setdefaulttelegrafenvvariables.ps1 /opt/omsagentwindows/scripts/powershell # copy ruby scripts to /opt folder COPY ./omsagentwindows/installer/scripts/*.rb /opt/omsagentwindows/scripts/ruby/ @@ -62,6 +63,9 @@ COPY ./omsagentwindows/installer/conf/fluent-docker-parser.conf /etc/fluent/ COPY ./omsagentwindows/installer/conf/fluent-bit.conf /etc/fluent-bit COPY ./omsagentwindows/installer/conf/out_oms.conf /etc/omsagentwindows +# copy telegraf conf file +COPY ./omsagentwindows/installer/conf/telegraf.conf /etc/telegraf/ + # copy keepcert alive ruby scripts COPY ./omsagentwindows/installer/scripts/rubyKeepCertificateAlive/*.rb /etc/fluent/plugin/ diff --git a/kubernetes/windows/main.ps1 b/kubernetes/windows/main.ps1 index 722392157..95cba2579 100644 --- a/kubernetes/windows/main.ps1 +++ b/kubernetes/windows/main.ps1 @@ -273,9 +273,9 @@ function Get-ContainerRuntime { return $containerRuntime } -function Start-Fluent { +function Start-Fluent-Telegraf { - # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service. + # Run fluent-bit service first so that we do not miss any logs being forwarded by the fluentd service and telegraf service. # Run fluent-bit as a background job. Switch this to a windows service once fluent-bit supports natively running as a windows service Start-Job -ScriptBlock { Start-Process -NoNewWindow -FilePath "C:\opt\fluent-bit\bin\fluent-bit.exe" -ArgumentList @("-c", "C:\etc\fluent-bit\fluent-bit.conf", "-e", "C:\opt\omsagentwindows\out_oms.so") } @@ -289,35 +289,99 @@ function Start-Fluent { (Get-Content -Path C:/etc/fluent/fluent.conf -Raw) -replace 'fluent-docker-parser.conf','fluent-cri-parser.conf' | Set-Content C:/etc/fluent/fluent.conf } + # Start telegraf only in sidecar scraping mode + $sidecarScrapingEnabled = [System.Environment]::GetEnvironmentVariable('SIDECAR_SCRAPING_ENABLED') + if (![string]::IsNullOrEmpty($sidecarScrapingEnabled) -and $sidecarScrapingEnabled.ToLower() -eq 'true') + { + Write-Host "Starting telegraf..." + Start-Telegraf + } + fluentd --reg-winsvc i --reg-winsvc-auto-start --winsvc-name fluentdwinaks --reg-winsvc-fluentdopt '-c C:/etc/fluent/fluent.conf -o C:/etc/fluent/fluent.log' Notepad.exe | Out-Null } -function Generate-Certificates { - Write-Host "Generating Certificates" - C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe -} +function Start-Telegraf { + # Set default telegraf environment variables for prometheus scraping + Write-Host "**********Setting default environment variables for telegraf prometheus plugin..." + .\setdefaulttelegrafenvvariables.ps1 + + # run prometheus custom config parser + Write-Host "**********Running config parser for custom prometheus scraping**********" + ruby /opt/omsagentwindows/scripts/ruby/tomlparser-prom-customconfig.rb + Write-Host "**********End running config parser for custom prometheus scraping**********" + + + # Set required environment variable for telegraf prometheus plugin to run properly + Write-Host "Setting required environment variables for telegraf prometheus input plugin to run properly..." + $kubernetesServiceHost = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_HOST", "process") + if (![string]::IsNullOrEmpty($kubernetesServiceHost)) { + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_HOST", $kubernetesServiceHost, "machine") + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_HOST - $($kubernetesServiceHost) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable KUBERNETES_SERVICE_HOST for target 'machine' since it is either null or empty" + } + + $kubernetesServicePort = [System.Environment]::GetEnvironmentVariable("KUBERNETES_SERVICE_PORT", "process") + if (![string]::IsNullOrEmpty($kubernetesServicePort)) { + [System.Environment]::SetEnvironmentVariable("KUBERNETES_SERVICE_PORT", $kubernetesServicePort, "machine") + Write-Host "Successfully set environment variable KUBERNETES_SERVICE_PORT - $($kubernetesServicePort) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable KUBERNETES_SERVICE_PORT for target 'machine' since it is either null or empty" + } + + $nodeIp = [System.Environment]::GetEnvironmentVariable("NODE_IP", "process") + if (![string]::IsNullOrEmpty($nodeIp)) { + [System.Environment]::SetEnvironmentVariable("NODE_IP", $nodeIp, "machine") + Write-Host "Successfully set environment variable NODE_IP - $($nodeIp) for target 'machine'..." + } + else { + Write-Host "Failed to set environment variable NODE_IP for target 'machine' since it is either null or empty" + } -function Bootstrap-CACertificates { + Write-Host "Installing telegraf service" + C:\opt\telegraf\telegraf.exe --service install --config "C:\etc\telegraf\telegraf.conf" + + # Setting delay auto start for telegraf since there have been known issues with windows server and telegraf - + # https://github.com/influxdata/telegraf/issues/4081 + # https://github.com/influxdata/telegraf/issues/3601 try { - # This is required when the root CA certs are different for some clouds. - $caCerts=Invoke-WebRequest 'http://168.63.129.16/machine?comp=acmspackage&type=cacertificates&ext=json' -UseBasicParsing | ConvertFrom-Json - if (![string]::IsNullOrEmpty($caCerts)) { - $certificates = $caCerts.Certificates - for ($index = 0; $index -lt $certificates.Length ; $index++) { - $name=$certificates[$index].Name - $certificates[$index].CertBody > $name - Write-Host "name: $($name)" - Import-Certificate -FilePath .\$name -CertStoreLocation 'Cert:\LocalMachine\Root' -Verbose - } + $serverName = [System.Environment]::GetEnvironmentVariable("PODNAME", "process") + if (![string]::IsNullOrEmpty($serverName)) { + sc.exe \\$serverName config telegraf start= delayed-auto + Write-Host "Successfully set delayed start for telegraf" + + } else { + Write-Host "Failed to get environment variable PODNAME to set delayed telegraf start" } } catch { - $e = $_.Exception - Write-Host $e - Write-Host "exception occured in Bootstrap-CACertificates..." + $e = $_.Exception + Write-Host $e + Write-Host "exception occured in delayed telegraf start.. continuing without exiting" } + Write-Host "Running telegraf service in test mode" + C:\opt\telegraf\telegraf.exe --config "C:\etc\telegraf\telegraf.conf" --test + Write-Host "Starting telegraf service" + C:\opt\telegraf\telegraf.exe --service start + + # Trying to start telegraf again if it did not start due to fluent bit not being ready at startup + Get-Service telegraf | findstr Running + if ($? -eq $false) + { + Write-Host "trying to start telegraf in again in 30 seconds, since fluentbit might not have been ready..." + Start-Sleep -s 30 + C:\opt\telegraf\telegraf.exe --service start + Get-Service telegraf + } +} + +function Generate-Certificates { + Write-Host "Generating Certificates" + C:\\opt\\omsagentwindows\\certgenerator\\certificategenerator.exe } function Test-CertificatePath { @@ -346,16 +410,9 @@ Remove-WindowsServiceIfItExists "fluentdwinaks" Set-EnvironmentVariables Start-FileSystemWatcher -#Bootstrapping CA certs for non public clouds and AKS clusters -$aksResourceId = [System.Environment]::GetEnvironmentVariable("AKS_RESOURCE_ID") -if (![string]::IsNullOrEmpty($aksResourceId) -and $aksResourceId.ToLower().Contains("/microsoft.containerservice/managedclusters/")) -{ - Bootstrap-CACertificates -} - Generate-Certificates Test-CertificatePath -Start-Fluent +Start-Fluent-Telegraf # List all powershell processes running. This should have main.ps1 and filesystemwatcher.ps1 Get-WmiObject Win32_process | Where-Object { $_.Name -match 'powershell' } | Format-Table -Property Name, CommandLine, ProcessId diff --git a/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 new file mode 100644 index 000000000..269894139 --- /dev/null +++ b/kubernetes/windows/setdefaulttelegrafenvvariables.ps1 @@ -0,0 +1,17 @@ +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_INTERVAL", "1m", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_MONITOR_PODS", "monitor_kubernetes_pods = false", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "pod_scrape_scope = 'node'", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_SCRAPE_SCOPE", "pod_scrape_scope = 'node'", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDPASS", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_FIELDDROP", "[]", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_PLUGINS_WITH_NAMESPACE_FILTER", " ", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_LABEL_SELECTOR", "kubernetes_label_selector = ''", "machine") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "process") +[System.Environment]::SetEnvironmentVariable("AZMON_TELEGRAF_CUSTOM_PROM_KUBERNETES_FIELD_SELECTOR", "kubernetes_field_selector = ''", "machine") + diff --git a/kubernetes/windows/setup.ps1 b/kubernetes/windows/setup.ps1 index dd6d52a11..25aad5e16 100644 --- a/kubernetes/windows/setup.ps1 +++ b/kubernetes/windows/setup.ps1 @@ -8,10 +8,12 @@ Write-Host ('Creating folder structure') New-Item -Type Directory -Path /opt/fluent-bit New-Item -Type Directory -Path /opt/scripts/ruby + New-Item -Type Directory -Path /opt/telegraf New-Item -Type Directory -Path /etc/fluent-bit New-Item -Type Directory -Path /etc/fluent New-Item -Type Directory -Path /etc/omsagentwindows + New-Item -Type Directory -Path /etc/telegraf New-Item -Type Directory -Path /etc/config/settings/ New-Item -Type Directory -Path /etc/config/adx/ @@ -32,6 +34,20 @@ Write-Host ('Installing Fluent Bit'); } Write-Host ('Finished Installing Fluentbit') +Write-Host ('Installing Telegraf'); +try { + $telegrafUri='https://dl.influxdata.com/telegraf/releases/telegraf-1.18.0_windows_amd64.zip' + Invoke-WebRequest -Uri $telegrafUri -OutFile /installation/telegraf.zip + Expand-Archive -Path /installation/telegraf.zip -Destination /installation/telegraf + Move-Item -Path /installation/telegraf/*/* -Destination /opt/telegraf/ -ErrorAction SilentlyContinue +} +catch { + $ex = $_.Exception + Write-Host "exception while downloading telegraf for windows" + Write-Host $ex + exit 1 +} +Write-Host ('Finished downloading Telegraf') Write-Host ('Installing Visual C++ Redistributable Package') $vcRedistLocation = 'https://aka.ms/vs/16/release/vc_redist.x64.exe' diff --git a/scripts/build/windows/install-build-pre-requisites.ps1 b/scripts/build/windows/install-build-pre-requisites.ps1 index b5e6e2d18..3bb56ac2a 100755 --- a/scripts/build/windows/install-build-pre-requisites.ps1 +++ b/scripts/build/windows/install-build-pre-requisites.ps1 @@ -21,7 +21,7 @@ function Install-Go { # install go lang Write-Host("installing go ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing go completed") Write-Host "updating PATH variable" @@ -102,7 +102,7 @@ function Install-DotNetCoreSDK() { # install dotNet core sdk Write-Host("installing .net core sdk 3.1 ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing .net core sdk 3.1 completed") } @@ -129,7 +129,7 @@ function Install-Docker() { # install docker Write-Host("installing docker for desktop ...") - Start-Process msiexec.exe -Wait -ArgumentList '/I ' + $output + '/quiet' + Start-Process msiexec.exe -Wait -ArgumentList '/I ', $output, '/quiet' Write-Host("installing docker for desktop completed") } diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index d43a79f51..29b755331 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -127,7 +127,7 @@ remove_monitoring_tags() # validate cluster identity for Azure Arc enabled Kubernetes cluster if [ "$isArcK8sCluster" = true ] ; then - identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type -o json) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index d8ab7b345..ec57bf981 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -64,7 +64,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.1" +$mcrChartVersion = "2.8.2" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." $omsAgentDomainName="opinsights.azure.com" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 9d0c0aca5..9747d932d 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -44,7 +44,7 @@ defaultAzureCloud="AzureCloud" omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.8.1" +mcrChartVersion="2.8.2" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." @@ -311,7 +311,7 @@ parse_args() { validate_and_configure_supported_cloud() { echo "get active azure cloud name configured to azure cli" - azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]") + azureCloudName=$(az cloud show --query name -o tsv | tr "[:upper:]" "[:lower:]" | tr -d "[:space:]") echo "active azure cloud name configured to azure cli: ${azureCloudName}" if [ "$isArcK8sCluster" = true ]; then if [ "$azureCloudName" != "azurecloud" -a "$azureCloudName" != "azureusgovernment" ]; then @@ -339,8 +339,8 @@ validate_cluster_identity() { local rgName="$(echo ${1})" local clusterName="$(echo ${2})" - local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type) - identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') + local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type -o json) + identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"' | tr -d "[:space:]") echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then @@ -454,7 +454,7 @@ create_default_log_analytics_workspace() { echo "using existing default workspace:"$workspaceName fi - workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) + workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id -o json) workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') echo "workspace resource Id: ${workspaceResourceId}" } @@ -477,12 +477,12 @@ get_workspace_guid_and_key() { local wsName="$(echo ${resourceId} | cut -d'/' -f9)" # get the workspace guid - workspaceGuid=$(az resource show -g $rgName -n $wsName --resource-type $workspaceResourceProvider --query properties.customerId) + workspaceGuid=$(az resource show -g $rgName -n $wsName --resource-type $workspaceResourceProvider --query properties.customerId -o json) workspaceGuid=$(echo $workspaceGuid | tr -d '"') echo "workspaceGuid:"$workspaceGuid echo "getting workspace primaryshared key" - workspaceKey=$(az rest --method post --uri $workspaceResourceId/sharedKeys?api-version=2015-11-01-preview --query primarySharedKey) + workspaceKey=$(az rest --method post --uri $workspaceResourceId/sharedKeys?api-version=2015-11-01-preview --query primarySharedKey -o json) workspaceKey=$(echo $workspaceKey | tr -d '"') } @@ -621,7 +621,7 @@ else set_azure_subscription $workspaceSubscriptionId fi - workspaceRegion=$(az resource show --ids ${workspaceResourceId} --query location) + workspaceRegion=$(az resource show --ids ${workspaceResourceId} --query location -o json) workspaceRegion=$(echo $workspaceRegion | tr -d '"') echo "Workspace Region:"$workspaceRegion fi diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 6d14dfa5f..1cf7b5c97 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.1" +mcrChartVersion="2.8.2" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" @@ -202,7 +202,7 @@ validate_cluster_identity() { local rgName="$(echo ${1})" local clusterName="$(echo ${2})" - local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type -o json) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype @@ -216,7 +216,7 @@ validate_cluster_identity() { validate_monitoring_tags() { echo "get loganalyticsworkspaceResourceId tag on to cluster resource" - logAnalyticsWorkspaceResourceIdTag=$(az resource show --query tags.logAnalyticsWorkspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + logAnalyticsWorkspaceResourceIdTag=$(az resource show --query tags.logAnalyticsWorkspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider -o json) echo "configured log analytics workspace: ${logAnalyticsWorkspaceResourceIdTag}" echo "successfully got logAnalyticsWorkspaceResourceId tag on the cluster resource" if [ -z "$logAnalyticsWorkspaceResourceIdTag" ]; then diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json index 8ebef232a..0069307b7 100644 --- a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json @@ -13,7 +13,7 @@ "metadata": { "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" } - }, + }, "proxyEndpointUrl": { "type": "string", "defaultValue": "", @@ -114,8 +114,7 @@ }, "configurationProtectedSettings": { "omsagent.secret.wsid": "[reference(parameters('workspaceResourceId'), '2015-03-20').customerId]", - "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" , - "omsagent.proxy": "[if(equals(parameters('proxyEndpointUrl'), ''), '', parameters('proxyEndpointUrl'))]" + "omsagent.secret.key": "[listKeys(parameters('workspaceResourceId'), '2015-03-20').primarySharedKey]" }, "autoUpgradeMinorVersion": true, "releaseTrain": "Stable", diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index bda757eb8..0be806838 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -1494,4 +1494,4 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Running in replicaset. Disabling container enrichment caching & updates \n") } -} +} \ No newline at end of file diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 3d30ac5aa..48f82a9ab 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -10,9 +10,9 @@ import ( "strings" "time" + "github.com/fluent/fluent-bit-go/output" "github.com/microsoft/ApplicationInsights-Go/appinsights" "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" - "github.com/fluent/fluent-bit-go/output" ) var ( @@ -44,33 +44,45 @@ var ( ContainerLogsMDSDClientCreateErrors float64 //Tracks the number of write/send errors to ADX for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsSendErrorsToADXFromFluent float64 - //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) + //Tracks the number of ADX client create errors for containerlogs (uses ContainerLogTelemetryTicker) ContainerLogsADXClientCreateErrors float64 + //Tracks the number of OSM namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + OSMNamespaceCount int + //Tracks whether monitor kubernetes pods is set to true and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPods string + //Tracks the number of monitor kubernetes pods namespaces and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsNamespaceLength int + //Tracks the number of monitor kubernetes pods label selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsLabelSelectorLength int + //Tracks the number of monitor kubernetes pods field selectors and sent only from prometheus sidecar (uses ContainerLogTelemetryTicker) + PromMonitorPodsFieldSelectorLength int ) const ( - clusterTypeACS = "ACS" - clusterTypeAKS = "AKS" - envAKSResourceID = "AKS_RESOURCE_ID" - envACSResourceName = "ACS_RESOURCE_NAME" - envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" - envAppInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" - metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" - metricNameAvgLogGenerationRate = "ContainerLogsGeneratedPerSec" - metricNameLogSize = "ContainerLogsSize" - metricNameAgentLogProcessingMaxLatencyMs = "ContainerLogsAgentSideLatencyMs" - metricNameNumberofTelegrafMetricsSentSuccessfully = "TelegrafMetricsSentCount" - metricNameNumberofSendErrorsTelegrafMetrics = "TelegrafMetricsSendErrorCount" - metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" - metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" - metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" - metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" - metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" + clusterTypeACS = "ACS" + clusterTypeAKS = "AKS" + envAKSResourceID = "AKS_RESOURCE_ID" + envACSResourceName = "ACS_RESOURCE_NAME" + envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" + envAppInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT" + metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" + metricNameAvgLogGenerationRate = "ContainerLogsGeneratedPerSec" + metricNameLogSize = "ContainerLogsSize" + metricNameAgentLogProcessingMaxLatencyMs = "ContainerLogsAgentSideLatencyMs" + metricNameNumberofTelegrafMetricsSentSuccessfully = "TelegrafMetricsSentCount" + metricNameNumberofSendErrorsTelegrafMetrics = "TelegrafMetricsSendErrorCount" + metricNameNumberofSend429ErrorsTelegrafMetrics = "TelegrafMetricsSend429ErrorCount" + metricNameErrorCountContainerLogsSendErrorsToMDSDFromFluent = "ContainerLogs2MdsdSendErrorCount" + metricNameErrorCountContainerLogsMDSDClientCreateError = "ContainerLogsMdsdClientCreateErrorCount" + metricNameErrorCountContainerLogsSendErrorsToADXFromFluent = "ContainerLogs2ADXSendErrorCount" + metricNameErrorCountContainerLogsADXClientCreateError = "ContainerLogsADXClientCreateErrorCount" defaultTelemetryPushIntervalSeconds = 300 - eventNameContainerLogInit = "ContainerLogPluginInitialized" - eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" + eventNameContainerLogInit = "ContainerLogPluginInitialized" + eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" + eventNameCustomPrometheusSidecarHeartbeat = "CustomPrometheusSidecarHeartbeatEvent" + eventNameWindowsFluentBitHeartbeat = "WindowsFluentBitHeartbeatEvent" ) // SendContainerLogPluginMetrics is a go-routine that flushes the data periodically (every 5 mins to App Insights) @@ -100,6 +112,11 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { containerLogsMDSDClientCreateErrors := ContainerLogsMDSDClientCreateErrors containerLogsSendErrorsToADXFromFluent := ContainerLogsSendErrorsToADXFromFluent containerLogsADXClientCreateErrors := ContainerLogsADXClientCreateErrors + osmNamespaceCount := OSMNamespaceCount + promMonitorPods := PromMonitorPods + promMonitorPodsNamespaceLength := PromMonitorPodsNamespaceLength + promMonitorPodsLabelSelectorLength := PromMonitorPodsLabelSelectorLength + promMonitorPodsFieldSelectorLength := PromMonitorPodsFieldSelectorLength TelegrafMetricsSentCount = 0.0 TelegrafMetricsSendErrorCount = 0.0 @@ -118,17 +135,39 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { ContainerLogTelemetryMutex.Unlock() if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { - SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) - flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) - TelemetryClient.Track(flushRateMetric) - logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) - logSizeMetric := appinsights.NewMetricTelemetry(metricNameLogSize, logSizeRate) - TelemetryClient.Track(logRateMetric) - Log("Log Size Rate: %f\n", logSizeRate) - TelemetryClient.Track(logSizeMetric) - logLatencyMetric := appinsights.NewMetricTelemetry(metricNameAgentLogProcessingMaxLatencyMs, logLatencyMs) - logLatencyMetric.Properties["Container"] = logLatencyMsContainer - TelemetryClient.Track(logLatencyMetric) + if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { + telemetryDimensions := make(map[string]string) + telemetryDimensions["CustomPromMonitorPods"] = promMonitorPods + if promMonitorPodsNamespaceLength > 0 { + telemetryDimensions["CustomPromMonitorPodsNamespaceLength"] = strconv.Itoa(promMonitorPodsNamespaceLength) + } + if promMonitorPodsLabelSelectorLength > 0 { + telemetryDimensions["CustomPromMonitorPodsLabelSelectorLength"] = strconv.Itoa(promMonitorPodsLabelSelectorLength) + } + if promMonitorPodsFieldSelectorLength > 0 { + telemetryDimensions["CustomPromMonitorPodsFieldSelectorLength"] = strconv.Itoa(promMonitorPodsFieldSelectorLength) + } + if osmNamespaceCount > 0 { + telemetryDimensions["OsmNamespaceCount"] = strconv.Itoa(osmNamespaceCount) + } + + SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) + + } else if strings.Compare(strings.ToLower(os.Getenv("OS_TYPE")), "windows") == 0 { + SendEvent(eventNameWindowsFluentBitHeartbeat, make(map[string]string)) + } else { + SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) + flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) + TelemetryClient.Track(flushRateMetric) + logRateMetric := appinsights.NewMetricTelemetry(metricNameAvgLogGenerationRate, logRate) + logSizeMetric := appinsights.NewMetricTelemetry(metricNameLogSize, logSizeRate) + TelemetryClient.Track(logRateMetric) + Log("Log Size Rate: %f\n", logSizeRate) + TelemetryClient.Track(logSizeMetric) + logLatencyMetric := appinsights.NewMetricTelemetry(metricNameAgentLogProcessingMaxLatencyMs, logLatencyMs) + logLatencyMetric.Properties["Container"] = logLatencyMsContainer + TelemetryClient.Track(logLatencyMetric) + } } TelemetryClient.Track(appinsights.NewMetricTelemetry(metricNameNumberofTelegrafMetricsSentSuccessfully, telegrafMetricsSentCount)) if telegrafMetricsSendErrorCount > 0.0 { @@ -255,12 +294,60 @@ func InitializeTelemetryClient(agentVersion string) (int, error) { } if isProxyConfigured == true { - CommonProperties["IsProxyConfigured"] = "true" + CommonProperties["IsProxyConfigured"] = "true" } else { - CommonProperties["IsProxyConfigured"] = "false" - } + CommonProperties["IsProxyConfigured"] = "false" + } + + // Adding container type to telemetry + if strings.Compare(strings.ToLower(os.Getenv("CONTROLLER_TYPE")), "daemonset") == 0 { + if strings.Compare(strings.ToLower(os.Getenv("CONTAINER_TYPE")), "prometheussidecar") == 0 { + CommonProperties["ContainerType"] = "prometheussidecar" + } + } TelemetryClient.Context().CommonProperties = CommonProperties + + // Getting the namespace count, monitor kubernetes pods values and namespace count once at start because it wont change unless the configmap is applied and the container is restarted + + OSMNamespaceCount = 0 + osmNsCount := os.Getenv("TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT") + if osmNsCount != "" { + OSMNamespaceCount, err = strconv.Atoi(osmNsCount) + if err != nil { + Log("OSM namespace count string to int conversion error %s", err.Error()) + } + } + + PromMonitorPods = os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS") + + PromMonitorPodsNamespaceLength = 0 + promMonPodsNamespaceLength := os.Getenv("TELEMETRY_CUSTOM_PROM_MONITOR_PODS_NS_LENGTH") + if promMonPodsNamespaceLength != "" { + PromMonitorPodsNamespaceLength, err = strconv.Atoi(promMonPodsNamespaceLength) + if err != nil { + Log("Custom prometheus monitor kubernetes pods namespace count string to int conversion error %s", err.Error()) + } + } + + PromMonitorPodsLabelSelectorLength = 0 + promLabelSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_LABEL_SELECTOR_LENGTH") + if promLabelSelectorLength != "" { + PromMonitorPodsLabelSelectorLength, err = strconv.Atoi(promLabelSelectorLength) + if err != nil { + Log("Custom prometheus label selector count string to int conversion error %s", err.Error()) + } + } + + PromMonitorPodsFieldSelectorLength = 0 + promFieldSelectorLength := os.Getenv("TELEMETRY_CUSTOM_PROM_FIELD_SELECTOR_LENGTH") + if promFieldSelectorLength != "" { + PromMonitorPodsFieldSelectorLength, err = strconv.Atoi(promFieldSelectorLength) + if err != nil { + Log("Custom prometheus field selector count string to int conversion error %s", err.Error()) + } + } + return 0, nil } diff --git a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb index d3a96d37d..8cb6f603e 100644 --- a/source/plugins/ruby/CAdvisorMetricsAPIClient.rb +++ b/source/plugins/ruby/CAdvisorMetricsAPIClient.rb @@ -370,7 +370,7 @@ def getPersistentVolumeMetrics(metricJSON, hostName, metricNameToCollect, metric metricTags[Constants::INSIGHTSMETRICS_TAGS_PV_CAPACITY_BYTES] = volume["capacityBytes"] metricItem["Tags"] = metricTags - + metricItems.push(metricItem) end end diff --git a/source/plugins/ruby/arc_k8s_cluster_identity.rb b/source/plugins/ruby/arc_k8s_cluster_identity.rb index 7824f3d4e..552dafb1f 100644 --- a/source/plugins/ruby/arc_k8s_cluster_identity.rb +++ b/source/plugins/ruby/arc_k8s_cluster_identity.rb @@ -26,6 +26,7 @@ def initialize @log.info "initialize start @ #{Time.now.utc.iso8601}" @token_expiry_time = Time.now @cached_access_token = String.new + @isLastTokenRenewalUpdatePending = false @token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token" @cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" @kube_api_server_url = KubernetesApiClient.getKubeAPIServerUrl @@ -41,14 +42,20 @@ def initialize def get_cluster_identity_token() begin - # get the cluster msi identity token either if its empty or near expirty. Token is valid 24 hrs. + # get the cluster msi identity token either if its empty or near expiry. Token is valid 24 hrs. if @cached_access_token.to_s.empty? || (Time.now + 60 * 60 > @token_expiry_time) # Refresh token 1 hr from expiration # renew the token if its near expiry if !@cached_access_token.to_s.empty? && (Time.now + 60 * 60 > @token_expiry_time) - @log.info "renewing the token since its near expiry @ #{Time.now.utc.iso8601}" - renew_near_expiry_token - # sleep 60 seconds to get the renewed token available - sleep 60 + if !@isLastTokenRenewalUpdatePending + @log.info "token expiry - @ #{@token_expiry_time}" + @log.info "renewing the token since token has near expiry @ #{Time.now.utc.iso8601}" + renew_near_expiry_token + # sleep 60 seconds to get the renewed token available + sleep 60 + @isLastTokenRenewalUpdatePending = true + else + @log.warn "last token renewal update still pending @ #{Time.now.utc.iso8601}" + end end @log.info "get token reference from crd @ #{Time.now.utc.iso8601}" tokenReference = get_token_reference_from_crd @@ -61,6 +68,7 @@ def get_cluster_identity_token() token = get_token_from_secret(token_secret_name, token_secret_data_name) if !token.nil? @cached_access_token = token + @isLastTokenRenewalUpdatePending = false else @log.warn "got token nil from secret: #{@token_secret_name}" end @@ -123,7 +131,17 @@ def get_token_reference_from_crd() tokenReference["expirationTime"] = status["expirationTime"] tokenReference["secretName"] = status["tokenReference"]["secretName"] tokenReference["dataName"] = status["tokenReference"]["dataName"] - end + elsif get_response.code.to_i == 404 # this might happen if the crd resource deleted by user accidently + @log.info "since crd resource doesnt exist hence creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" + crd_request_body = get_crd_request_body + crd_request_body_json = crd_request_body.to_json + create_request = Net::HTTP::Post.new(crd_request_uri) + create_request["Content-Type"] = "application/json" + create_request["Authorization"] = "Bearer #{@service_account_token}" + create_request.body = crd_request_body_json + create_response = @http_client.request(create_request) + @log.info "Got response of #{create_response.code} for POST #{crd_request_uri} @ #{Time.now.utc.iso8601}" + end rescue => err @log.warn "get_token_reference_from_crd call failed: #{err}" ApplicationInsightsUtility.sendExceptionTelemetry(err, { "FeatureArea" => "MDM" }) @@ -141,20 +159,23 @@ def renew_near_expiry_token() cluster_identity_resource_namespace: @@cluster_identity_resource_namespace, cluster_identity_resource_name: @@cluster_identity_resource_name, } - crd_request_body = get_crd_request_body - crd_request_body_json = crd_request_body.to_json - update_request = Net::HTTP::Patch.new(crd_request_uri) + update_crd_request_body = { 'status': {'expirationTime': ''} } + update_crd_request_body_json = update_crd_request_body.to_json + update_crd_request_uri = crd_request_uri + "/status" + update_request = Net::HTTP::Patch.new(update_crd_request_uri) update_request["Content-Type"] = "application/merge-patch+json" update_request["Authorization"] = "Bearer #{@service_account_token}" - update_request.body = crd_request_body_json + update_request.body = update_crd_request_body_json update_response = @http_client.request(update_request) - @log.info "Got response of #{update_response.code} for PATCH #{crd_request_uri} @ #{Time.now.utc.iso8601}" + @log.info "Got response of #{update_response.code} for PATCH #{update_crd_request_uri} @ #{Time.now.utc.iso8601}" if update_response.code.to_i == 404 @log.info "since crd resource doesnt exist hence creating crd resource : #{@@cluster_identity_resource_name} @ #{Time.now.utc.iso8601}" create_request = Net::HTTP::Post.new(crd_request_uri) create_request["Content-Type"] = "application/json" create_request["Authorization"] = "Bearer #{@service_account_token}" - create_request.body = crd_request_body_json + create_crd_request_body = get_crd_request_body + create_crd_request_body_json = create_crd_request_body.to_json + create_request.body = create_crd_request_body_json create_response = @http_client.request(create_request) @log.info "Got response of #{create_response.code} for POST #{crd_request_uri} @ #{Time.now.utc.iso8601}" end diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index c803c0fa2..c057f7c2c 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -19,7 +19,10 @@ class Kube_nodeInventory_Input < Input @@rsPromUrlCount = ENV["TELEMETRY_RS_PROM_URLS_LENGTH"] @@rsPromMonitorPods = ENV["TELEMETRY_RS_PROM_MONITOR_PODS"] @@rsPromMonitorPodsNamespaceLength = ENV["TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH"] + @@rsPromMonitorPodsLabelSelectorLength = ENV["TELEMETRY_RS_PROM_LABEL_SELECTOR_LENGTH"] + @@rsPromMonitorPodsFieldSelectorLength = ENV["TELEMETRY_RS_PROM_FIELD_SELECTOR_LENGTH"] @@collectAllKubeEvents = ENV["AZMON_CLUSTER_COLLECT_ALL_KUBE_EVENTS"] + @@osmNamespaceCount = ENV["TELEMETRY_OSM_CONFIGURATION_NAMESPACES_COUNT"] def initialize super @@ -296,6 +299,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromUrl"] = @@rsPromUrlCount properties["rsPromMonPods"] = @@rsPromMonitorPods properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength + properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength + properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength + properties["osmNamespaceCount"] = @@osmNamespaceCount end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) telemetrySent = true From f63d2ac69d120f8840b9b473e087e54a37adf171 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 26 Mar 2021 17:20:39 -0700 Subject: [PATCH 11/18] Removing readme duplicates (#521) --- ReleaseNotes.md | 91 ------------------------------------------------- 1 file changed, 91 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 01fb861cf..04bd7c6e5 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -10,97 +10,6 @@ additional questions or comments. ## Release History Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) -### 02/23/2021 - -##### Version microsoft/oms:ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod02232021 (linux) -##### Version microsoft/oms:win-ciprod02232021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod02232021 (windows) -##### Code change log -- ContainerLogV2 schema support for LogAnalytics & ADX (not usable externally yet) -- Fix nodemetrics (cpuusageprecentage & memoryusagepercentage) metrics not flowing. This is fixed upstream for k8s versions >= 1.19.7 and >=1.20.2. -- Fix cpu & memory usage exceeded threshold container metrics not flowing when requests and/or limits were not set -- Mute some unused exceptions from going to telemetry -- Collect containerimage (repository, image & imagetag) from spec (instead of runtime) -- Add support for extension MSI for k8s arc -- Use cloud specific instrumentation keys for telemetry -- Picked up newer version for apt -- Add priority class to daemonset (in our chart only) - -### 01/11/2021 - -##### Version microsoft/oms:ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod01112021 (linux) -##### Version microsoft/oms:win-ciprod01112021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod01112021 (windows) -##### Code change log -- Fixes for Linux Agent Replicaset Pod OOMing issue -- Update fluentbit (1.14.2 to 1.6.8) for the Linux Daemonset -- Make Fluentbit settings: log_flush_interval_secs, tail_buf_chunksize_megabytes and tail_buf_maxsize_megabytes configurable via configmap -- Support for PV inventory collection -- Removal of Custom metric region check for Public cloud regions and update to use cloud environment variable to determine the custom metric support -- For daemonset pods, add the dnsconfig to use ndots: 3 from ndots:5 to optimize the number of DNS API calls made -- Fix for inconsistency in the collection container environment variables for the pods which has high number of containers -- Fix for disabling of std{out;err} log_collection_settings via configmap issue in windows daemonset -- Update to use workspace key from mount file rather than environment variable for windows daemonset agent -- Remove per container info logs in the container inventory -- Enable ADX route for windows container logs -- Remove logging to termination log in windows agent liveness probe - -### 11/09/2020 - -##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod11092020 (linux) -##### Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod11092020 (windows) -##### Code change log -- Fix for duplicate windows metrics - -### 10/27/2020 - -##### Version microsoft/oms:ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10272020 (linux) -##### Version microsoft/oms:win-ciprod10272020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10272020 (windows) -##### Code change log -- Activate oneagent in few AKS regions (koreacentral,norwayeast) -- Disable syslog -- Fix timeout for Windows daemonset liveness probe -- Make request == limit for Windows daemonset resources (cpu & memory) -- Schema v2 for container log (ADX only - applicable only for select customers for piloting) - -### 10/05/2020 - -##### Version microsoft/oms:ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod10052020 (linux) -##### Version microsoft/oms:win-ciprod10052020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod10052020 (windows) -##### Code change log -- Health CRD to version v1 (from v1beta1) for k8s versions >= 1.19.0 -- Collection of PV usage metrics for PVs mounted by pods (kube-system pods excluded by default)(doc-link-needed) -- Zero fill few custom metrics under a timer, also add zero filling for new PV usage metrics -- Collection of additional Kubelet metrics ('kubelet_running_pod_count','volume_manager_total_volumes','kubelet_node_config_error','process_resident_memory_bytes','process_cpu_seconds_total','kubelet_runtime_operations_total','kubelet_runtime_operations_errors_total'). This also includes updates to 'kubelet' workbook to include these new metrics -- Collection of Azure NPM (Network Policy Manager) metrics (basic & advanced. By default, NPM metrics collection is turned OFF)(doc-link-needed) -- Support log collection when docker root is changed with knode. Tracked by [this](https://github.com/Azure/AKS/issues/1373) issue -- Support for Pods in 'Terminating' state for nodelost scenarios -- Fix for reduction in telemetry for custom metrics ingestion failures -- Fix CPU capacity/limits metrics being 0 for Virtual nodes (VK) -- Add new custom metric regions (eastus2,westus,australiasoutheast,brazilsouth,germanywestcentral,northcentralus,switzerlandnorth) -- Enable strict SSL validation for AppInsights Ruby SDK -- Turn off custom metrics upload for unsupported cluster types -- Install CA certs from wire server for windows (in certain clouds) - -### 09/16/2020 - -> Note: This agent release targetted ONLY for non-AKS clusters via Azure Monitor for containers HELM chart update -##### Version microsoft/oms:ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod09162020 (linux) -##### Version microsoft/oms:win-ciprod09162020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod09162020 (windows) -##### Code change log -- Collection of Azure Network Policy Manager Basic and Advanced metrics -- Add support in Windows Agent for Container log collection of CRI runtimes such as ContainerD -- Alertable metrics support Arc K8s cluster to parity with AKS -- Support for multiple container log mount paths when docker is updated through knode -- Bug fix related to MDM telemetry - -### 08/07/2020 - -##### Version microsoft/oms:ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08072020 (linux) -##### Version microsoft/oms:win-ciprod08072020 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod08072020 (windows) -##### Code change log -- Collection of KubeState metrics for deployments and HPA -- Add the Proxy support for Windows agent -- Fix for ContainerState in ContainerInventory to handle Failed state and collection of environment variables for terminated and failed containers -- Change /spec to /metrics/cadvisor endpoint to collect node capacity metrics -- Disable Health Plugin by default and can enabled via configmap -- Pin version of jq to 1.5+dfsg-2 -- Bug fix for showing node as 'not ready' when there is disk pressure -- oneagent integration (disabled by default) -- Add region check before sending alertable metrics to MDM -- Telemetry fix for agent telemetry for sov. clouds - ### 03/26/2021 - ##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) From a01175025cf04415424f8e182f164795b5d6ca71 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 22 Apr 2021 15:33:22 -0700 Subject: [PATCH 12/18] David/to merge into ciprod 04222021 (#545) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) * using latest stable version of msys2 (#465) * fixing the windows-perf-dups (#466) * chart updates related to new microsoft/charts repo (#467) * Changes for creating 11092020 release (#468) * MDM exception aggregation (#470) * grwehner/mdm custom metric regions (#471) Remove custom metrics region check for public cloud * updaitng rs limit to 1gb (#474) * grwehner/pv inventory (#455) Add fluentd plugin to request persistent volume info from the kubernetes api and send to LA * Gangams/fix for build release pipeline issue (#476) * use isolated cdpx acr * correct comment * add pv fluentd plugin config to helm rs config (#477) * add pv fluentd plugin to helm rs config * helm rbac permissions for pv api calls * Gangams/fix rs ooming (#473) * optimize kpi * optimize kube node inventory * add flags for events, deployments and hpa * have separate function parseNodeLimits * refactor code * fix crash * fix bug with service name * fix bugs related to get service name * update oom fix test agent * debug logs * fix service label issue * update to latest agent and enable ephemeral annotation * change stream size to 200 from 250 * update yaml * adjust chunksizes * add ruby gc env * yaml changes for cioomtest11282020-3 * telemetry to track pods latency * service count telemetry * rename variables * wip * nodes inventory telemetry * configmap changes * add emit streams in configmap * yaml updates * fix copy and paste bug * add todo comments * fix node latency telemetry bug * update yaml with latest test image * fix bug * upping rs memory change * fix mdm bug with final emit stream * update to latest image * fix pr feedback * fix pr feedback * rename health config to agent config * fix max allowed hpa chunk size * update to use 1k pod chunk since validated on 1.18+ * remove debug logs * minor updates * move defaults to common place * chart updates * final oomfix agent * update to use prod image so that can be validated with build pipeline * fix typo in comment * Gangams/enable arc onboarding to ff (#478) * wip * updates * trigger login if the ctx cloud not same as specified cloud * add missed commit * Convert PV type dictionary to json for telemetry so it shows up in logs (#480) * fix 2 windows tasks - 1) Dont log to termination log 2) enable ADX route for containerlogs in windows (for O365) (#482) * fix ci envvar collection in large pods (#483) * grwehner/jan agent tasks (#481) - Windows agent fix to use log filtering settings in config map. - Error handling for kubelet_utils get_node_capacity in case /metrics/cadvsior endpoint fails. - Remove env variable for workspace key for windows agent * updating fbit version and cpu limit (#485) * reverting to older version (#487) * Gangams/add fbsettings configurable via configmap (#486) * wip * fbit config settings * add config warn message * handle one config provided but not other * fixed pr feedback * fix copy paste error * rename config parameter names * fix typo * fix fbit crash in helm path * fix nil check * Gangams/jan agent release tasks (#484) * wip * explicit amd64 affinity for hybrid workloads * fix space issue * wip * revert vscode setting file * remove per container logs in ci (#488) * updates for ciprod01112021 release (#489) * new yaml files (#491) * Use cloud-specific instrumentation keys (#494) If APPLICATIONINSIGHTS_AUTH_URL is set/non-empty then the agent will now grab a custom IKey from a URL stored in APPLICATIONINSIGHTS_AUTH_URL * upgrade apt to latest version (#492) * upgrade apt to latest version * fix pr feedback * Gangams/add support for extension msi for arc k8s cluster (#495) * wip * add env var for the arc k8s extension name * chart update * extension msi updates * fix bug * revert chart and image to prod version * minor text changes * image tag to prod * wip * wip * wip * wip * final updates * fix whitespaces * simplify crd yaml * Gangams/arm template arc k8s extension (#496) * arm templates for arc k8s extension * update to use official extension type name * update * add identity property * add proxyendpointurl parameter * add default values * Gangams/aks monitoring via policy (#497) * enable monitoring through policy * wip * handle tags * wip * add alias * wip * working * updates * working * with deployment name * doc updates * doc updates * fix typo in the docs * revert to use operatingSystem from osImage for node os telemety (#498) * Container log v2 schema changes (#499) * make pod name in mdsd definition as str for consistency. msgp has no type checking, as it has type metadata in it the message itself. * Add priority class to the daemonsets (#500) * Add priority class to the daemonsets Add a priority class for omsagent and have the daemonsets use this to be sure to schedule the pods. Daemonset pods are constrained in scheduling to run on specific nodes. This is done by the daemonset controller. When a node shows up it will create a pod with a strong affinity to that node. When a node goes away, it will delete the pod with the node affinity to that node. Kubernetes pod scheduling does not know it is a daemonset but it does know it is tied to a specific node. With default scheduling, it is possible for the pods to be "frozen out" of a node because the node already is full. This can happen because "normal" pods may already exist and are looking for a node to get scheduled on when a node is added to the cluster. The daemonset controller will only first create the pod for the node at around the same time. The kubernetes scheduler is running async from all of this and thus there can be a race as to who gets scheduled on the node. The pod priority class (and thus the pod priority) is a way to indicate that the pod has a higher scheduling priority than a default pod. By default, all pods are at priority 0. Higher numbers are higher priority. Setting the priority to something greater than zero will allow the omsagent daemonsets to win a race against "normal" pods for scheduled resources on a node - and will also allow for graceful eviction in the case the node is too full. Without this, omsagent can be left out of node in clusters that are very busy, especially in dynamic scaling situations. I did not test the windows pod as we have no windows clusters. * CR feedback * fix node metric issue (#502) * Bug fixes for Feb release (#504) * bug fix for mdm metrics with no limits * fix exception bug * Gangams/feb 2021 agent bug fix (#505) * fix npe in getKubeServiceRecords * use image fields from spec * fix typo * cover all cases * handle scenario only digest specified * changes for release -ciprod02232021 (#506) * Gangams/e2e test framework (#503) * add agent e2e fw and tests * doc and script updates * add validation script * doc updates * yaml updates * fix typo * doc updates * more doc updates * add ISTEST for helm chart to use arc conf * refactor test code * fix pr feedback * fix pr feedback * fix pr feedback * fix pr feedback * scrape new kubelet pod count metric name (#508) * Adding explicit json output to az commands as the script fails if az is configured with Table output #409 (#513) * Gangams/arc proxy contract and token renewal updates (#511) * fix issue with crd status updates * handle renewal token delays * add proxy contract * updates for proxy cert for linux * remove proxycert related changes * fix whitespace issue * fix whitespace issue * remove proxy in arm template * doc updates for microsoft charts repo release (#512) * doc updates for microsoft charts repo release * wip * Update enable-monitoring.sh (#514) Line 314 and 343 seems to have trailing spaces for some subscriptions which is exiting the script even for valid scenarios Co-authored-by: Ganga Mahesh Siddem * Prometheus scraping from sidecar and OSM changes (#515) * add liveness timeout for exec (#518) * chart and other updates (#519) * Saaror osmdoc (#523) * Create ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Add files via upload * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * telemetry bug fix (#527) * Fix conflicting logrotate settings (#526) The node and the omsagent container both have a cron.daily file to rotate certain logs daily. These settings are the same for some files in /var/log (mounted from the node with read/write access), causing the rotation to fail when both try to rotate at the same time. So then the /var/log/*.1 file is written to forever. Since these files are always written to and never rotated, it causes high memory usage on the node after a while. This fix removes the container logrotate settings for /var/log, which the container does not write to. * bug fix (#528) * Gangams/arc ev2 deployment (#522) * ev2 deployment for arc k8s extension * fix charts path issue * rename scripts tar * add notifications * fix line endings * fix line endings * update with prod repo * fix file endings * added liveness and telemetry for telegraf (#517) * added liveness and telemetry for telegraf * code transfer * removed windows liveness probe * done * Windows metric fix (#530) * changes * about to remove container fix * moved caching code to existing loop * removed un-necessary changes * removed a few more un-necessary changes * added windows node check * fixed a bug * everything works confirmed * OSM doc update (#533) * Adding MDM metrics for threshold violation (#531) * Rashmi/april agent 2021 (#538) * add Read_from_Head config for all fluentbit tail plugins (#539) See the commit message of: fluent/fluent-bit@70e33fa for details explaining the fluentbit change and what Read_from_Head does when set to true. * fix programdata mount issue on containerd win nodes (#542) * Update sidecar mem limits (#541) * David/release 4 22 2021 (#544) * updating image tag and agent version * updated liveness probe * updated release notes again * fixed date in version file Co-authored-by: Ganga Mahesh Siddem Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner Co-authored-by: deagraw Co-authored-by: Michael Sinz <36865706+Michael-Sinz@users.noreply.github.com> Co-authored-by: Nicolas Yuen Co-authored-by: seenu433 --- .pipelines/build-linux.sh | 5 + .pipelines/pipeline.user.linux.yml | 7 +- ...rom-cdpx-and-push-to-ci-acr-linux-image.sh | 38 +++- ...m-cdpx-and-push-to-ci-acr-windows-image.sh | 39 +++- Documentation/OSMPrivatePreview/Image1.jpg | Bin 0 -> 120932 bytes Documentation/OSMPrivatePreview/ReadMe.md | 71 +++++++ ReleaseNotes.md | 17 ++ ReleaseProcess.md | 19 +- .../conf/td-agent-bit-prom-side-car.conf | 19 +- .../linux/installer/conf/td-agent-bit-rs.conf | 13 ++ build/linux/installer/conf/td-agent-bit.conf | 16 ++ .../linux/installer/scripts/livenessprobe.sh | 9 + .../scripts/tomlparser-mdm-metrics-config.rb | 21 ++ build/version | 4 +- charts/azuremonitor-containers/Chart.yaml | 2 +- .../templates/omsagent-daemonset-windows.yaml | 1 + charts/azuremonitor-containers/values.yaml | 21 +- ...ContainerInsightsExtension.Parameters.json | 66 +++++++ .../Public.Canary.RolloutSpec.json | 29 +++ .../RolloutSpecs/Public.FF.RolloutSpec.json | 29 +++ .../Public.HighLoad.RolloutSpec.json | 29 +++ .../Public.LightLoad.RolloutSpec.json | 29 +++ .../RolloutSpecs/Public.MC.RolloutSpec.json | 29 +++ .../Public.MediumLoad.RolloutSpec.json | 29 +++ .../Public.Pilot.RolloutSpec.json | 29 +++ .../ScopeBindings/Public.ScopeBindings.json | 125 ++++++++++++ .../Scripts/pushChartToAcr.sh | 181 ++++++++++++++++++ .../ServiceModels/Public.ServiceModel.json | 159 +++++++++++++++ .../ServiceGroupRoot/buildver.txt | 1 + kubernetes/container-azm-ms-agentconfig.yaml | 5 + kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/main.sh | 4 + kubernetes/linux/setup.sh | 4 + kubernetes/omsagent.yaml | 17 +- kubernetes/windows/Dockerfile | 2 +- .../onboarding/managed/enable-monitoring.ps1 | 6 +- .../onboarding/managed/enable-monitoring.sh | 2 +- .../onboarding/managed/upgrade-monitoring.sh | 2 +- .../existingClusterOnboarding.json | 7 - .../existingClusterParam.json | 3 - source/plugins/go/src/telemetry.go | 2 - source/plugins/ruby/KubernetesApiClient.rb | 34 +++- source/plugins/ruby/MdmAlertTemplates.rb | 69 ++++++- source/plugins/ruby/MdmMetricsGenerator.rb | 155 ++++++++++++--- source/plugins/ruby/constants.rb | 13 +- source/plugins/ruby/filter_cadvisor2mdm.rb | 42 +++- source/plugins/ruby/in_kube_nodes.rb | 96 ++++++++++ source/plugins/ruby/podinventory_to_mdm.rb | 3 +- 48 files changed, 1400 insertions(+), 105 deletions(-) mode change 100755 => 100644 .pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh mode change 100755 => 100644 .pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh create mode 100644 Documentation/OSMPrivatePreview/Image1.jpg create mode 100644 Documentation/OSMPrivatePreview/ReadMe.md create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json create mode 100644 deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt diff --git a/.pipelines/build-linux.sh b/.pipelines/build-linux.sh index f4c92fda2..53f6a3a07 100644 --- a/.pipelines/build-linux.sh +++ b/.pipelines/build-linux.sh @@ -14,3 +14,8 @@ cd $DIR/../build/linux echo "----------- Build Docker Provider -------------------------------" make cd $DIR + +echo "------------ Bundle Shell Extension Scripts & HELM chart -------------------------" +cd $DIR/../deployment/arc-k8s-extension/ServiceGroupRoot/Scripts +tar -czvf ../artifacts.tar.gz ../../../../charts/azuremonitor-containers/ pushChartToAcr.sh + diff --git a/.pipelines/pipeline.user.linux.yml b/.pipelines/pipeline.user.linux.yml index 57273111e..565661d64 100644 --- a/.pipelines/pipeline.user.linux.yml +++ b/.pipelines/pipeline.user.linux.yml @@ -24,10 +24,15 @@ restore: build: commands: - - !!defaultcommand + - !!buildcommand name: 'Build Docker Provider Shell Bundle' command: '.pipelines/build-linux.sh' fail_on_stderr: false + artifacts: + - from: 'deployment' + to: 'build' + include: + - '**' package: commands: diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh old mode 100755 new mode 100644 index 3844ea185..e7d26245f --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-linux-image.sh @@ -35,12 +35,22 @@ echo "end: read appid and appsecret which has read access on cdpx acr" # suffix 00 primary and 01 secondary, and we only use primary # This configured via pipeline variable echo "login to cdpxlinux acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET -echo "login to cdpxlinux acr completed: ${CDPX_ACR}" +echo $CDPX_ACR_APP_SECRET | docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to cdpxlinux acr: ${CDPX_ACR} completed successfully." +else + echo "-e error login to cdpxlinux acr: ${CDPX_ACR} failed.Please see release task logs." + exit 1 +fi echo "pull agent image from cdpxlinux acr: ${CDPX_ACR}" docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} -echo "pull image from cdpxlinux acr completed: ${CDPX_ACR}" +if [ $? -eq 0 ]; then + echo "pulling of agent image from cdpxlinux acr: ${CDPX_ACR} completed successfully." +else + echo "-e error pulling of agent image from cdpxlinux acr: ${CDPX_ACR} failed.Please see release task logs." + exit 1 +fi echo "CI Release name is:"$CI_RELEASE imagetag=$CI_RELEASE$CI_IMAGE_TAG_SUFFIX @@ -51,13 +61,29 @@ echo "CI AGENT REPOSITORY NAME : ${CI_AGENT_REPO}" echo "tag linux agent image" docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +if [ $? -eq 0 ]; then + echo "tagging of linux agent image completed successfully." +else + echo "-e error tagging of linux agent image failed. Please see release task logs." + exit 1 +fi echo "login ciprod acr":$CI_ACR -docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET -echo "login to ${CI_ACR} acr completed" +echo $ACR_APP_SECRET | docker login $CI_ACR --username $ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to ciprod acr: ${CI_ACR} completed successfully" +else + echo "-e error login to ciprod acr: ${CI_ACR} failed. Please see release task logs." + exit 1 +fi echo "pushing the image to ciprod acr:${CI_ACR}" docker push ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} -echo "pushing the image to ciprod acr completed" +if [ $? -eq 0 ]; then + echo "pushing of the image to ciprod acr completed successfully" +else + echo "-e error pushing of image to ciprod acr failed. Please see release task logs." + exit 1 +fi echo "end: pull linux agent image from cdpx and push to ciprod acr" diff --git a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh old mode 100755 new mode 100644 index 095a00039..19fe55722 --- a/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh +++ b/.pipelines/pull-from-cdpx-and-push-to-ci-acr-windows-image.sh @@ -34,12 +34,22 @@ echo "end: read appid and appsecret which has read access on cdpx acr" # suffix 00 primary and 01 secondary, and we only use primary # This configured via pipeline variable echo "login to cdpxwindows acr:${CDPX_ACR}" -docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password $CDPX_ACR_APP_SECRET -echo "login to cdpxwindows acr:${CDPX_ACR} completed" +echo $CDPX_ACR_APP_SECRET | docker login $CDPX_ACR --username $CDPX_ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to cdpxwindows acr: ${CDPX_ACR} completed successfully." +else + echo "-e error login to cdpxwindows acr: ${CDPX_ACR} failed.Please see release task logs." + exit 1 +fi echo "pull image from cdpxwin acr: ${CDPX_ACR}" docker pull ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} -echo "pull image from cdpxwin acr completed: ${CDPX_ACR}" +if [ $? -eq 0 ]; then + echo "pulling of image from cdpxwin acr: ${CDPX_ACR} completed successfully." +else + echo "pulling of image from cdpxwin acr: ${CDPX_ACR} failed. Please see release task logs." + exit 1 +fi echo "CI Release name:"$CI_RELEASE echo "CI Image Tax suffix:"$CI_IMAGE_TAG_SUFFIX @@ -49,13 +59,30 @@ echo "agentimagetag="$imagetag echo "tag windows agent image" docker tag ${CDPX_ACR}/official/${CDPX_REPO_NAME}:${CDPX_AGENT_IMAGE_TAG} ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} +if [ $? -eq 0 ]; then + echo "tagging of windows agent image completed successfully." +else + echo "-e error tagging of windows agent image failed. Please see release task logs." + exit 1 +fi echo "login to ${CI_ACR} acr" -docker login $CI_ACR --username $ACR_APP_ID --password $ACR_APP_SECRET -echo "login to ${CI_ACR} acr completed" +echo $ACR_APP_SECRET | docker login $CI_ACR --username $ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to acr: ${CI_ACR} completed successfully." +else + echo "login to acr: ${CI_ACR} failed. Please see release task logs." + exit 1 +fi + echo "pushing the image to ciprod acr" docker push ${CI_ACR}/public/azuremonitor/containerinsights/${CI_AGENT_REPO}:${imagetag} -echo "pushing the image to ciprod acr completed" +if [ $? -eq 0 ]; then + echo "pushing the image to ciprod acr completed successfully." +else + echo "pushing the image to ciprod acr failed. Please see release task logs" + exit 1 +fi echo "end: pull windows agent image from cdpx and push to ciprod acr" diff --git a/Documentation/OSMPrivatePreview/Image1.jpg b/Documentation/OSMPrivatePreview/Image1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04cd03ab127c94aec072d56c418da612b934604a GIT binary patch literal 120932 zcmeFZXH=8jw=Wt6K`EkC5f!CN6{Luih)Nfc-h{jgNDYx1N+1f-n}Cp)E=@!Tp?4C2 z00JUXBb|hz^n@Bn2siIJdz^dDe~-P-z4ycYu*X?>)=0*9)|1RR*PL_Bx#n->^w;SE z;F5ukz7F8bnKOX*^cUcC8t@2k_RN`ou77I`=NSHJOpJ^S=a`w8ng8kMFR)%Xf1c$$ zGcyZ23kxe7y)j?7$idEb@t@ECe91qL|ML|6Vmr@#{+|*5@7n2Cz~%D{uNmJkoDl+? zy?ln@@|jaCKo9^pbB<2!KLr0E|Px3ILbwy|||a(?CF>gMk2=N|wK3<{2T7a0{D6C0PB_8~n3@pooc zVNr3(r_#@5UutUW>KhuHnp--%u-!eq-}?GTM#sh{aK9#}@Px&s<(1!mR@X>7yLurkU-VFgzaA@*EY<*ax8MP;|S6TWgh ze^wvxV9&}0uKgMZuR8^F*>S|JFR0RPk*^9~Q;+8)2N(s9`@ae>mN&P}RG;~9>&FTH;s1k-`G32URtZtEOWbX{&A`u)2@lUh>Q0#P5*r!OK78sjK%Pni8l8e ziNdzgh0@Kp#Px$%a^fQ9LZBKJIDO+L*$t*R&uJSPzA{fyUTFKw1*`s|zpEwj-Lq?X z{oA+t00)fhDIkv0IjEbt**Lg)n1|4Q0eebLE2Lf{`&S0mVxi@wt~Ug#vrWgmGWomP zF5iNNeqp^Pp$^dBkjg#D1iB3|h5Zs__M5*E$ZQ@8>ar~#xW>n6JK#wnq!gvth*wqt@HH5%*dMy$!~)_t@4yfh)CFVm)FxT^Pi6+ z_sC0{aijOc!#>!{bch|?Z;{F*zaW)%%nQ_6jkWO+hS&pJpT<%fTZaAh4`M=(GPmfpGwd!v9)bs`C`EPy(uVny)tA&|W+R z#7vlU%OVS{D{NmHv=*tPeS81F+R52cpC@MMmLAFvl?7iwVQQ6YDbL{@YBb?MxvpxB z9~g~d+oXKY{NNoAo5ki!$p<-)pTX-oo2!!y60%MK5nc_u^&5UKO#KT><}2r{cD7(8 zrvS#uQ$V;JOyi^o?lLI6D89)GE~A>06G=~J)wL9l^N@ZiHg-Q4zQzfqsK_T2aZa+n z|H3V&&b(C~`}#q`5BsgTVW5HbYypOAj}` zCv&k+U;BK_N;|)M3OM(KY`iGrvpxz(7MVu=wCP$dWKS@XThjV8D zRT058LR$)R}XjgE$F;tsMwdrs`zKVv3I$~mv+ITQ@0L)6y8GL~Aw`}1Z|G6Srp7HC8|gTPuHrk#1!cbS=p@%stz_d^zFl z_yKMMLl+3s!c^N@^5&$ zPZArZwDT%HbkVv_m9F?X-;d+x^5Y&de2#dde)|+4xaFI~aTXRVZ##6cRQkHKjEdXS z+ua9VUYtjweR6u=aF_e5&9LU=zeZAb4!pCd=0)ow7IXmxkae+e;*s3sAwfKW2Z22W z2-KZGdvONV`CO_Wpm`njYq5^jM|6q*f%rs? zOwL|90#kZsca?*t(COnUJP$v($@%-1Ec2+AkVRsVeEYHYljpo6;Dppxs+~F??cx)% zl@iV^(G!Hp^^m({zk5dvKMCeHfQ>r*{zaK@P856jt839NZ8q*a2X%|*a zs?{6WGm^bBG$`GYT9Bo*-K70H`7^2nW<(L5UI$e(H50u~0oX8c(!2Zl(GW$;i8lSv zgH6-TPx{IOidgooiuH)3vvMV2KzN6Gn9v%io<15hDRmG$PH6Ha-o@UZk5M;de3K&h z>yw*w`YB+>XV`JL7wOyi!QDKMy|v6hVRLKK>=Ymxw>u_kX^uDM2me)!JlT%RrBHC7 zQOzp4qY3jv>K<5=(ycbAu{cFDK zejMmvGi{#gdkW|aP#b`dWd#$_l5SU%<25;H{igsfO(Ck>s7Q(cw&%$2NRC%oa>vd# zNTF^`IlpD{O4m76hChPu;6zglBkkvWRb!myJ@Qq7T+80hES&;)Na37bLt_TcGGkM1 z^1JTe!Y|L_zk1h?M+IBaiDxW7snNWIp>m1iyEh!}5;J0*7f@_dzdT6x?suk@(|sAW z#CKEv=AK*oaY69jYaL&ZFuX%i(GclDyRc934?OOfBaD8{)5ymn!uWM#u&JFHh{$5? zo3N$~k;S$r_mOT14!zSO@feZkAKFIsK_$i69#3;0opT?TF!0bH)JBPAhe0iCag}|) z4o?A-a2jioKgD(p$R^z;NL1^&KSq;n=cUQCj>?LmN7c^>%o;KpdHZFw+E3lXc3*$N zl7vx3(G5KO8O-n}nw>TcBt8T31ZJBy4DYcscMpWXLtaliHpa^Co9gdtyt$V8fM2Y= zoW?#$oXv=!bDv8N@%CVRJy<^LjzD;QIg@Ad`T1l1WZxPITwXu3JM}v`ouUlWI{Ao} zrp@pS8pKxqq+*Q z)__4s#x&K@Pl=nv>weR1?eQtmj)>tx7s0=Uph6B;`OxERS1 zq9cGV90Gv@9S6rSt2~MEg>h|keu193;?E9YJ4D-#f~7w1y+s{UR<>)QKs=o_RF_7` z!Ik)f$TZWYYI2xg*Jp!KtmZgtMHFv?#04h!y({72TAqI`th99&yV-3K z4m_v&<6}pS&)w$cYq$cz>Tjc=+@BWqHX65`U(K!Dv)H`#umU}=;L%?29S!lCniZ`+ zKt%%=S{$U>$>30uxb?~3VIqI>nQ4PEJ=uibb{pEyvs`Rx)6#1A#+2}~+^gpHUlTjg zf79n0LYq1T5Q(5kT^(}nDd0SgDg>OuSq02bVy1wQownF4nK?*0FBJSkna$`9HtvSr+P#_0>`1~fmmzJvkq)U=OL2|a zD-G>(ko2t4@g?foh5LQU z9mC|j13L}AjfXa{V%>|?n|Jeeo(IWP7W!}TCna%GKC%qbZEgu;VLscSi4iS$*VZsd ztQF#19vC|O_x((0F%j%9QiRtBW$IOwxSA(gxHxUKgTL%r&VL^d8Cy154XEGyL(PW> zkHVc%ze4c9jThbour5C7Epy}s>@S%u|0{pBW!S0QiB&edlJd6>OTQ2IyRTOmQp3E* zF$;BhH_6%MfrZ-(x*L>5V4?#P(FmI{g^81?xAq%w%ahMM_rdsDs{Q-o>t#`@%wGKZ zN2Rn2N(2b+dKmW!D(oCgjTBZ&;=m)`?HN*x&v=;itpxFL>3Va^Ke{Pkpr1G|#wE{Z zarV*kQvmgHQ74?28cMl2>52xNRJ8$##pK}Qo$I!97nf??n1Scr`=Xs)#khx2tK5U)+mThGZ!Y6X=u0yt526k?@_O9XRQ4?dQ^ z&Gv{`d%&N3AWYX*Q`%^YeB z(0>DpSt!ouRM#NP!fmQf@@S0if|F$*R}a~}S~l1EM6_n3O}E*;gSJIp@F*i(Pz~@` zaIzScuDj6gcw-)5@BGx=-gOT)di3E)#`Nf#jjZfmsyvMDe_r!j(zk;j6@_u+NNO>r za!S!y@=4S9p>E1sd@?Kb`8XVZi}*Wcn;ZEdZuAE!DAkcAp|<62e!9q;sGHZuw6q8} zOHi$1Y14UYb0|b+$$WdXxKea>a^R;4DhjS0B1Z8aqulKP5+8#m^YQQ`7+u#`scNTy zcOlptoMYg94P_e67L)oogU{;Oy7Z4!hDK(ewD8wjD~+_<@D6d!^RQs@DS#js3%mrA zoV!Q5h`HY0B8Cc*nsR&nh($-JG1A6FOX7J3+jMT#0}(542qlopg_)=eRURPVx{$_Z zq;oTt_rV$Y1XjE0?9~!yb8ABEN>ujHAG_W!6C}djXcJySWkL6A02!vh^u;O$o}o~^jJ=If>7n`=Ct#7Y6TKHHGs@c zmNd-{u(aB>8V;J>XgyZ;1roJU7gjMBFbkhS_2S`Z-WenD4w>`QGHth^qJt%sMa@=D zUf5Lg__`nZQk+XU44|@cykHfyLoWOjP!ru50*nS;@CR|sJ#zoFMcUlglT%`U>M7Yi z-gRq0{z&HmOOgBMX8;rw>0ENpto`U^I%OL5r!7?>sW#WpF0;{V4phGTLYv#1*MuY< zyEo7DCv7blUEo;a*6>mB5SmtT>EQ(cZ@?{Gk&DKrQp=zu!TO@h2@c{as)m)6}K8+RWuyV^aXu^^rCf^onLijXK= za99C-u4uwzeCFiP5;r5kX{5g$Fi*zl`xlb7i{}qCthT7T1|(gKlzdDBFAOom9-owH zN(D+SV0z@DhVaQOFwJT-P94kL^J3W+bW7()tQMT?KS zOhCkvvB%*K-HJG&<%zkaCt{87?sax?Rk&wrKYND+1fSy>8Y1G1o5-c$MF!34lX8y| zqaqOov!<&-Q8tB{nMjo*_B*a0F2n`Jm3+8k`ByN@f0-?sqv?YSFK?<(brcsg~vZef^pBdW^5d; zd`SKLsxeiSUrQWN%^+3>citw8)4eO|SJx0c3eK?yWSyPWT^bC%lDcwiy~B}#yL< zjeYy3R#sx2g@qAots*f_#+Y%$eN?Y8V?FWC|Gpu}rVTd!LY-hjo|L$*qC z^nuFFrEZAeynK%`4<@Y_Zc3dZho6*Y#Xr2;3N0T1Gc%+;9M+kYgUF0F&TX%CtWAf4 z;ps@Yp;WN&I}&IEPF(L5tiwdn?zrdOpjcr+=YOHIi|KJaq}jN}t|`+-J^lT|BR(#l zKgL72x54|)^56`4g!4xc9)Ln1J>y}kKdFe4%~;^uIh_afJ8f3QAeYlUK*eLJEh#k&%LOFC?o#ZH%W886Qj7g-Nvy6!s!Z=plA z!(4avhEJw}h)3T}0Up3#W_YX3!2;J~NH>s0JkE2EHM*th(P#TfrM9F3>!`*I<(qF5 zE(pKv^1F16VxV$Y$ zR~24*T(}*Jxm-+gKoQ|xLluK6Q=-a^DwdYDdQS~C>J*CE^p{rdratw}I!t`DP#USk z5zz?ez*JSpsPl!C3>5YiTbKUYocN(PB_7FW_r%>K=7=RF@4V93u)#puUm;m7b@HmbkPqiwGqZXx`*1Z3k7c8)nA?pdIgjkgev5JNNH+@z;KAbG(5Cx zh%D~1p^93)!9jK_^O!sP^JYd!@&)UlsrGKF3KvJcB(h6ygj%Dwe7)iQ`Tiu`pWLRO zbDFc}3KW=DiQWiUaw9Ygx8_4s#L2*j!0Xj0}S+5#@g18o$V(osazSU&h=SJ>3wTISZ}PL zGYWIsSA7jFB`D_sW0ud>fH0{ z89UJ$->lvI$pPAhT%yW>F1hSWAPztDMj9qZFnW1G#>)%5?Nw|nI%{8)#4qYvXxd+T zxiI;|#q_N)v(}a0>-EReRTa%a_aTrwDBwu}-6lcNzRHgJ@?Aqyck1}IuElZiGr!YLpse~Knb;^}K0lLO*~ z#=*4V+W_84p8i52Gt0*it8wmUBVd(Lh}8DsWN4?Y!z>&%cC>zALzk`v_1sAuP1u+) z9~H((kAyw2e7we``u-=4pXa>ZPF_!Q?#9Lr=tQge_Bt4avTE}@H7d^6vXS|xz7zGT zG{P^Y4J01JXE18zkTs~ds&vaUe8=~*a8XCF~p(e!Vz0{yHe1gskN>_JBk(i)?{1ehs$F%e#5&CIT9~F zwZNk^Mai)XC;_U{DAk^jXi5#7TeoaV9Wbk_N?I9VY_O4$aSCx2GQ0oTB{X82@0rar zu8lb$Sql%0ztk-l2b06kbM|^wq{xIxLHbb7@iV)<3(n$u#LwnpU3E{Y6HzL8iw z#R5|$3b{i0q=IB~TF!l}%kVcXm(pI|kmY`VCE<;CR^_X$W%Rr+Rk3k~QWIo4494Ke z>(Je;dJ^ay)ro}Yfe4W845~=uJ(-_8?r+BzW>B8gROi`jddg*}J&s+9((AbtDOVkT z+jBX_KMl{x3Xna4UPFn>DQ_<2 z?j3;|AJLZ^PZXPbaPOwng1w!VE&}VoPW(|zxGu{B3;kj(TQ@AiT)m52%DWL9H2JN^ zBbo%k`jS)@p%Li2s{u7@S)uJ)WuM3fYbB|vvIM+)sPPZ_D2 zjwtc6^?w_@$h_5OCoI& zGJ@__*dGypTf<|F7OlgCR~w;xygL0oQugY|h&h=-L>lM7QqP~N6!kRk`_7F%LHB5i zRDB|_)0jCh9Hten{-|6_|&{3)jWt*TqH<&eK(zn%qIe7-a3Ni`= zEhI;%b0FP=Gve`rY;ISR8m_pjSlXKN?t->n4*tF`;iiC1YsyK>YW5}$bs@6s(-Y8w z!=g2#V27TO*^QU3NbsCISJ&0_PMzU5kq-c$JFa{w+tz_)Dc8}qyW#c|t*c9shO-}3 z>kZ++=&i}s2DIl<_#&1jOyuYW#eps%_cuWnn2|7k*W-~@h3To#xHZ$PG)c{GjB~d4 z72k-&VZSZZ%68iu#{s#hV4_OrA?GAjuZc9U9(HEJpX}u&?YB((-c;9+WDSwYnB!*m z+?1WI4Vw8I4R3G#(@R&pNtjWohtWq@WQ7~=%19^m7uL(O;ARB3DR?GE>*kmDs;f46 z{NytWz;o6fye%r>FyZ1Ua@5_6B5XRuFOJ6J2Vi?P#y`#S!O^=OR)a5k$wQnwm zL3VAAf)&d`kg6WU`5*nPrvPxUKc?>&S-Zr&1@zW)+Jws#0!VU3eBD5>l(4^M;`e0LnFb{j9UtIA5X*dx!Pk>JgTM;7% z-`!7+-Sl)G7pEu{8ZxrDr14K_8kcG)^xw?@9OE{pNjK*4;u}ST`Af5pWU%*r>;xr+ z81x4HS%jAM&Sm-peeLV$ks_rLI1=`g6SGc!G*`~@WH?WIu)xb3lD`y|{VB@#(HBYe zfjbj%?_5`Y>Ep24K4nS)Y0%~P8Vquhid)p>CF~@Oe7OBm{OQFU`=Z-YjjH97ntg&G z+D{PC()haD4Y}xtM4fsMZ<&M+)V5PiV=C`dP-NKC+{|c=4~MAHI2sHr8vn3%XHnI0 zpn0ta^Knhx=Gz@rdvzZ5>q-I?!@5+TMzMqafQ^7aj_#l+f3rKsvo=q;PY`nIIeF-j zZOhFNxh6aW;|5|}g|oC-EP5n*YT@%4_2InDP|@Nwzk_NQyZ6J~yB9kX6Td(ccxL$Z zN1Dh(_;PPN!^x)_RI>A>KQe5b1}$%uN9@nCl$}REeoD!ACo=gw018u`Y8&fLko%{A zw+dynA@V38l&fd`{EtcLkCF2h2@E?QT!xVo4>+jUBW9>(5QHRdL;M|~l+Dgbvj>`; zq{CD{2YPuNZAK0qJeqYkbODFKy8jwtN_NGmN1gLJ<2)6R-y z3izlv(?4)+^JM7?dWmJ#_|OBFv6H&_DtY97CrJ*A#`|sR160obJS%Ern*@3?sxGi7cx|0m^EUBSedrh4bxj#G z?jSVlp6#@0CP%ghMC7uO7_pTJ z*Zc-7eP#O_Ba?6#nx~cw#Y19%_0eBLC2@r4_%{6lax@+jD`)jQx*MuIl^nO^v3bK~ z63P~1Fb@`qpIgWBRSpoD)XqEwRT4f z(+1lblKQ`|1#BS{g5=vLU2HOJy7Qde9v}j8@)>`fN1p(9;+CJY*4Z zyN4f(8>k^}En^g~sAfl{1>6R%G^})+XLll%#-`zyd%Hb`&dbHvS?8=F0!&r~J&Y9HH`N6 z2abH%E4jUyC#6b8A$X(w*ftqFGY);rntZo6kEQLWrxNN?Ux@dOQjPgc*}tbEr~8!W zGbTXueZr3$b6|P4=h0PZWGDQ(+daQ+x60!q+oi4M#e>3<)xa{l<-qlZ+$^_HiZ*>o zb<-3kcMt_S!-RfPUl33+(5$3#Xa#w_>wu=%Mhrknr+kd_km5ECPRdce-bT zd+4=&Lh?RvV|!6Xm>~ES#I&AuZUPxv=sqP=GM?B|E@c=hEyMD`Z8la;R=zp@TEo+) zz83vBdKNuuapbyj5Wu>D%U?JJaOl2jy`Ii(l}{U)uXBi{=i~D5%j=-F-AduAs)k)l z!3KN%rs7vtosa*vbV>PU{y0EG-u6Y+zBiBK!vx4}H4*1n>`2#0dhk$&yp?Awsc2C< zzmxsOPgcW35A6N1Pos~AWv+ylTQ>CsR0ise!A1<`MO<0AehRXY1xNz~rJ+r}Vlt@v zYTaSh{t_Jsl@{ni5t68AQ*upF%xIi|>J_XsrfPBD`UI7AhG&S!{`9c5cXv2LlJ@Nq zLo8yFZ>ZI9TuZ6FF5YG9<{5tesBm=Ai)sj!p!*Grbqtdxn=Oov?(1u9nrM53G?3kI z_uAB*s`p56^mTlfs5@}Ya7X8j#C82+JxX zt;3+!mxfUL^D(x%QEe7Qe-H>A88eVwRMneD!r5=nG9%JWnZ|GK==k;+Y2#6`Q{r(i zM%1&LrMK$WZ6Kb0Ck9jYDkgbX;-parhLK7B=baLimjqzgA1Z9rzJfsJF3d%shv3`z z;=tW*2Z=TkQ#8^&mE8qlYGHsuML0Gut<}PN^vGX$3p7eVo znJ~n;PE#6#X$KMKEznu`Hv2D+gbgh05x!(Dptkq`}>oFkL=w;PqevBFYgG_v7m5HnhI^i_7o7Ej8)rY)ei?< z>Vwdu%)eI7RZUDAM=}_)Rx{n?j}^(-yJl4kb0QjJ<)Y~Ib(jS~3KTyll=oX}cx(TP zBjWK$Tl0-84%(L8Rrf?6+*4X;2}2YFYOIjJy>Xhi1L0WP#$R+}oe!GOTKBPyFju7Vyq$ zSH2wdX%K4EJ5Y3U+}7Z7iNI`#$X-0`2Q}$fW_x&s+RL1XsPg;L++Tro*|eR|_bK-+ zcRy&f=de0?7bX#yPjHD)w?743u&kTfUx`O`yFmO7-cPHfiKUm?Z%A=|$Z)+(+q z%3HXRo>6V=T&OvH%U`pI$D&bWsd9EM)dO12(nrE2*3C!Fv5v#EDrS&*X@ygUAy75H zB#E)wYyRR4Z&bzOCQxE0O~_B#2aY60QgLcm$#b`3b44nHWR z(;07sefp{^@K!1;k>w>LBklIhrQ+r_;dB4AQP;0)|Rsl@EK_RQo@fwg{JKszDJNBW)oX| zj&vkTdHyV0%OMFoQpy!`*{x!mVTfSyS>%o0+^j`(S(Lw5T8r&QVh2M)X<y{iC^hN$kL&Yq-e%B;pfk!>yMv1MtMs{&VxyzTQGB%#=OS8+)vMw^$U^!}KZ?K~!mY6yV zyLta2cm3I|JvUnw?`4y5!#^z&g=nYa*Tt`kaL_tlQW|o@0rK~WF2{bnSY=x4vca)^tBS|8(_DL{KSHqHIHBJ7#yVP=IFU^^ zX59`5hkCI6x7u2nAlN|Lth=RLE>8X{IkCv1>s>%>?uYVFW;_SqY8a5%9#vmBKed+i zwCwE6l>M~c4XK!4P6{bwR&U*cnQ|%CFvC{jhDr+z#7t!Xmuf^I3Z8$6#wrYYrhVPj z=bg>?Szz@3R+lo)u}%orPQKd_Y}|!nwf>XEo9j}%de!rxfSsZBJMRbWuen1~^|$oJ z)9fdwI(sgap7$Bkcg3WaS3)O>W+z3Fd0z&iMKUCPbM=R^vLRN~zR4Zg4-;kfjU^LF z+oo|6s9epMWtsxjqJ}I-+@gTmqC&dPB(WMwd1vkml-a)wpSm@>ls?Q9c&>&HO!_?` zc#$Rpdz>)uPRBGl5;f))dLI*e(w&2N-rVGg^N9ae5|q#AoN|jn^UNBcf2mSyBuEAs1~`Ez0&46SDr35GGc5QOXVUNcY`jXMOHO0k(Pg? zRoakKK1FxYBu4SdOgLUjne|aps6%$2H0c>bNS3x$v zNkycZ6*ZfKq*<|sqwgd&-@soLK1LAR=hh_+vPP;x;k6);&UB~!y%e85zGDY;#F1l+6b z>^B7+_L-cstxLy6=a^-0{ zGdh59_}hE$dEMdBkey$BTJXIO&Hgk3qTH)wb0;bB?v7=vid5=L_e{F&Ex;6ZsJY4 zHn)1L)2f_$OnvXcLVC3Fa8<&OT{`Y(O@Dz%7o!_Ps6K_)9Xb*S?X`W9HCIZ|;OL~u zqv;S@-Dj;K>zUF;NoR*O1~O6?I(!b5X-)|vg2ETS_hXwv=6&IQA3%ihHq^8oqV-DO ze!wYU1bGT5#&m^(H|WY$AH8W@8rC<*OWZ=1xmiuMISApnwBiI5)K3ALh8~%P=Zjxt z&Yg=nYyRq)A%H!&j`#aSu$LWn&q|bKeZeRof zLA?ddyE}HZ>&Xc0c99Lpc3Q6i!T0L6S0y3+laQ2+uaLBXHGuEEM=>!Oi{Ycnk?HtJ z6#`q!t7*mReM2JZ%5fmj# zhVv$tcz`(SwUDkYe(%rwt=AsdW!uPHyLmFw$3FA*esW5`@|Clt0tfF%j5LX#XztY= zJHx`jBJ2&x5xK%q5vM%DFgVw1+ayb2;9aAPY!UF@P&3PiYJ*EA(bCOnb&mX6 z<@GXrcjFSXTT=tlXE%l}`mN!guPkf2Hq{3im}CV!y^ppKmnw1QMF~jR-xz&fnyjf3 zXopXZr%HWXZ>bBo+L(wEsTu}P-M%*WJtdQ$zNhhkJM_0?Nv8Sr?;rR5iI`3M^1x9# z4l@}oeG2I0C;sl4yOZ6iY?;LuQI8bb)`{wRf|C(VHqFviu{trLfG9V5BK{sjImCBx|jacN@x+ z_Oy^-ZIa?(k*pRcrGG`5>5M<)j?~KbTfw?9K`x-{AQYRr-pG4{V;s%%Crqra2Hhq< z{Y$Xk{pA7Zc8qx-iN)X-qi2FVrG9C?W+|7ULa;v%d zSfx<5{?y3B#3zQ1>sGFX6PZE>W$?7x3lAm&u5(DVI2yV~}$N{N-Aj+parrd4ap{ zm8;e^_U3g4kEED&SpIxls}QWsm^IG*oQb!L2VX@xPuceNU8$Yoeq~H_J=w{yl#7hV~OdC5gc;LjGV6eJP(t zTQ)R48L3m1G?MwS%(LSI3X7jO{4?A~{(bO=>_Xh_zFL_MuW&!)z#m@rP-$_A+Sf2M z{FyJ12|*!|OZ9b5rq{7M1&RFj2W4!*sB2+AHMz;5P<*+^=EIe>^&U>o;mtkd{oj^( z`*$KsFHiLf%d}h&o>x1=85G-$^Z7l1d0~9b^#}_cCTrO-+vG2Qo~l;4LbJ6*1>Q1qIW~o0x#; zDSFzZDd`I0+4R?^0)zbp>;+Bp^_rUOf=rp3-$Z7}jOQIXqca@yYUrA(sV z6j?N9xvl+k#FdT{{kMa(|9`*p*qKep_}O{OEMt|cYdtxn-ZAcYr_DzkJ!9sen8D7) zALNkPQGqwWk5_LaV}K*wm?`x%9;_LvuNivwB_Od23G?-)qisY};YVNSNa1FeQ$Wl5 zHV4^w4|vSFiXmUhU(}_F9}AwG+ncAJHzuJ<`|b_g>z8QL1lgss?So=bW;S^H3vxaL0UqP+3y_B%2_18XlwA)@2@Vj^8zd1KN-hMn+f}%qF(E5M;z29 z_hTNf3)8qxIB4O=5E`R6nV$1rphceov_e%*0cW|W9&CqkzE6IqQ4CN((WB5`N1Y`Vj;Y_jIClx`{G$&UGp}-n0H_FHs>BYSfr9GGvlE zesybv-+vJTm|BRbolxoIr^Q2hwwz$NnQ26<}1D~ zxk(IuzQc~)6xV@1O9Z_``nSPdy9)ngOk7^O!p59aEfjl)KYB2~Ez}4+4O1hYe6YRx zsKAvZ9>w=6q0ISeqA0BBqRdy&OtQp>PRC`@VDG2&-24>q=oIh~O;0A@UZ5$$|AiL% z*>RLi#4+ViRmOvklQbsS-`^TCA-^E*&_-xBzgxn8cV+nW`iksot8c9hzaJF&*(IDd zpl(v#_e=*9r+Frjt5=X6sm_lKlNYlwf>SRyh#Ep4U3~{QcP8xF zy>5?x{q28|gP;qd^ay;&Y_~6APvs%{)8$552%?JOGuwC@241NH#Sjc`FVEeq0KJl0 z_GDy}ongBu31p+P5Ou?;e5GWFE)g2(m(StGXBhgTWYX=9mb3HgnxJyUXI~ig8T0Qs z1EPMbuF(IH;C~grATj?Be2;GwPP_%|iEo`p9eG|?4ZfC~gmIF4H_xpkFg?UckLsWV zsdj^9If(ceRmBA=I<|Tx);Xm(0pXv($ma7f!R4A&!N0FA(@(Pei@g7ZX8|d>_0r{! za8Mb_!G(0Vz*|bn$Ifw#DV_rO<^s)Vmkr~~KZ5<%Lz-PuLjoFRFs@dK2Hn!@yE;d^ z#!r0d4(mVve!myq7I*eFkTUzW#fL^3S3O*Q{qBC@Mz~FpY<1lOdyd+E%3JkUg-SahL)r_C&8_aqD^LJ?%E$95U%m*v;n7u^pZH&ly6As4!7+lo zk~fdN`-o54Y)uu}-8$GH@%-m*&lHq{?*io4E?&HgLjV=qrbKMS4@I}>Md{evJt2;# zm+WMo;k>Nq9eosWX=XPqubzJJ>Obepabjh<-0u$T3`GT|Mm7#Rbl@fOg;SOMMRx4f zrz~T^F2aLCo2d_n3sW~fGIs#Vm6INh{Xs)^w5R%pS-G`&XGy4!U4 zhn38i57>+Oox_pB)kw&H=)?-#8WH%$*`Lr??K~;};&EL6owc1m!Y%Sqxo{37L_BRA zZybA>?DE*&>kwP*2Aw)Ht~v18p~IVfy8BLZWQMBdut=T_d&5J6Dzhr<+LI)-w9RZF z(+d*3Gg-VV8`>~3NoAJe=Rt<`Kba>f91S6U=xBO{mj99a4~a#6JO$X^y`eYi6R)p? zybqrIlvgR!`e|#rwN*3Z#Bv~L*+fUbAunB0)Nj=Gwn)WDjX{5Z?{&&O823aN`xLna zaw#xr0DIhNvgm=mnsCmUdp72ZF9AH-&2LnUdhUsU*@?Kdb!rwmgwxzM0jR(#F4tlPbEuxn#Ka%A9X_SHSh|JZ17S;4Q`gGzhg zlOej~p^Bl;1IWPqE?ncWnQQR7DVeqD|BJmh4`;hu_r`VBs;a8FL~CqKHP_t9Tuo7P zNlT4ssG5S1wx*hkqKy)zsJTkaqzGE7YD@_tVyGb`#vs!7`<#9DwV%Dud-gg1{N8iD zzjyzU%Y|fReb>6zz3%Drxoh&AY=2&poAHVC5WCca!GA+sKB*Ug{@*dM7 zR(XwfhJ8)if)>zK_AW1gj$-0MuWP&ECeJs29LTsaI5$#pCSb7ihA7{KpE2#qKIv}b zl-UBCDQn>B|GT^QKb!;=y!DliyWs_6Myu|fYMZmh^FlIcw%eY$rkGmgeCc3cHD||I zpoILtxUJSXZs99*=}OcS1X94e(xu?UcWN|B1e@DST1HYW!B{RT)wf3SEUR!7`1SlM`-fs}Cg@mDEj6S}(yWVDb&7 z`Hf0UZUi5-;xpycswp)wvXCCu#@uqypJuzVC6?NKEwF2fKKqh&_Q((P;!#P)AO z4dpj_%_k^wlmdtT@tcd5{n3xFU1)+>?WZung#?1fE=S@5o zVT*#PT8hhjeCxN$Sq}NISbzL4<{J1w>6|{lUFu+!vGG1dY?!fq#V05LbY|5SI-$nv zg^#;EX@74fjGNh~$~-~~?Tf{fxtY@f<5y)9QWx>97wk=(T3dRd-h7;`JhSFkkNkY< zF#aq_uk`O8?C*|8sRAD=ZNATPf@$)}f4S_>g1Z&7g4~_5hn>re%9fy?DvzE z2lxAhDstCW0SSK-DAe%P1M@3Cj_4iAVL5Q;zq+j1^j!A+_w@+@;CB^wn~X?}9DMY! zZR6obWf&nPvq;KHnV0Apy+77cF`HH7<+bAf{Hbm$`XKtp-{=`V>2dy6+QP!PMm=co zV5%<`=}O(WVk3hv0j@kRunS-3h1gjykyknAIIeVmMsjv|GvxNOWf`1gO*GFEYNU|N zA>e4AsXAQIre6~<8!i}PBYN=41(^dP2bZtPtz0c{{Igy2A6$zcMW|^{y^iXNMzdJ& z5hID!kyXdSEbzs4iL)Vy)8@`L`$kxlT;!f6Lzh~MTUr(L$UaI9 z$RcZ-Ie4q=Iq-}L-Cow2XfsK>CLNCPO;;b&mVP(PxR}4RoBbvPS@6?w(!B~ZlBM0z zU?dw+CWsrjiQ^J{J(J8Pcij5HkK{Z5g$81~net1phG@ao1*$@}V`C=O2er(mAqU{{ zI34kRt;#~r-Ru}xVCaL{FAtNxqHf9y+g(|gDZn|S@31;8S?^&LQjRe!3b z5uaAM!=mtE_RK8<7`w-QI7^i&2 zoM<{S*Dw2i$d5i4fQy&hb3_n!%#yPaoq*!|7^v>Cc3DX3a7>moxf zrzRsP%-=pvsStNRoBCajy{BNJOfCCx!b8W{LS;H0tNxDu3A-RjKqfiZs-3_KkE|!Ws)(jN+9Cr$LE_o_7oAS^kmw> z_Kxfe^pL60kPbWRv@KaTvma$%7&hIDJsq>(e?2r!G?5z-G!nayxAY~IX>3EPhh0B@ z%D6j7N}n8|O7^$l&q%$xZK%Le z-@A%j#nwz6{>fF7!yf$4tylL$|DDT*1)uxr zpPizoD!=|UiGOpj|Fsf-4K@60CH~Wuuu~O?5XjH%gDLyA?GVHs@!KCeb~q&bDi?sm zV)@tWzrO}nUhk$ep}d!lr*++FK1RL+ueE_*NoV~od@Wrk?p~{Uc>|xnc&}(m%lVEq z+%Jwq>;e#Zxq#gK04U)lW)^Y{NY>lc?@E^}4(*-8+7^xf(j{s9W+{X);(oJ?0QPdl zE)b^sg5CF=|CI;4=h%p6pawui`j>Hjv;5;gQo9(P)=gB__wYY`^v^?cKklDpED39#&^SV-wy4hQ)N;A1K9@b>TT=}?{Ahezz&6MEd6FlM(z&x z^&1QqZE=(it^AW=%r-qk!OvX4x_>5_ljb>KVy{@MHh$#6#hOM3n|o))>ZHYLQ?a^8>*GN(M? z-xMPgIG*DkyYDsL2F0u=76->x-V@JSPUngdz4Ble4}AcaUnZvoU-_=h5j9S{^yO6@>yaS7F#M`l+8H?|BZYuaaKmcUZOrj3Nylo1>+ zQ?4xBo5!ujp{YHSa`d7HR3N*CKDAMV;3lC@2+8Obgho@OLq1FrLQz2?og(oP&X(Yb z>XdUj0r&yk(_sQO48u@XqDdexLv*)VpcnhHyK(Nt6-z`>H)3Ao{>F^85KVB)fJ(%z zo`E;s#Ws&j*U_@llO6=utlum;8|*TKzr)wAZ|E8{t~34l!@VQw5*IBVM+^+C!-B6| z3-g2^U+1exWRCR5?iAJhW?8dG8m?;xl8<#|>2`%_Q}Z$NZMNP`{O|1H1+rGAymxHm z-_`M&2)`X^qgFo&N`I0XAmLM8AZsMakLJXBha#=Xee@K8>k&nYw4{fWGJO3Y50s06W;CanKqWC)!Wjd zx03M;p*^-mC#>KQ*F=`8v-ZLkY+ ztVtzrtY>=l;I-)qJ-O9{1oYEQH0QX7LbL zTR`(L*%rf&4y|lj4q)sL6oP$(K0|w-9%%gbQs*8y{r;0DrcMm={Y+V=*dL?=sz9w< zV8rU+Z5RNp*gFG#=-~9_i`eN~uFtJZOCJ)%8}%;jRKy(PgQrPKEVyS++7oEA&8zv_ z!|FNHWqpG*0y2%_)riS+G?*fel}raxILPdRrc`C&qtxN@O6s5i4L`O?5G*sIczL_j zK|b;2W)p+CUxcj(zwSQwKJ&8)5Oqg-_MKx6C&OV{9K4YTx%oB$NGyhs1-)HAeBMJo zzB^sD=iC+!raG9OHTv-+vH^xmGNdc)U4(~BIhGYndZ?j>aVSWHGV#bwd5OfUBLVD6 zH$-6xMz7xxece3TNY-z6Bmv~OYYY&7_{lh5h*1C0qU_k03l9cy^p()P?^0(j7LDEz ztyZo5%`zqzf1>J38`OqJ&lU}xp>SJ{fiqkuu+)odj8YQPG`rX?B*ok$H=*U%I)Y$c zTVXmZbYp9SW;y?id2Y00Ds)eDe-%|OS^Z_QSlVR z;p^v~JKPE3SSHh?yU@H%x+H+84;rk4&iRG6^B?Ek3JivAp^LMw3$~y1?K;w3(&3@3 z&{Ui%m3A}vVXZ_x7(ixJ9s-^oV0lz;0++*YbWR=|6svUbZVx;?{zbIzKsk&JLim=! zGpCpE3?($1ldx9yb{8Gd!u6FD8fY>kL9>p-ys4%rI%oigSwH+vkgJj1ihGK3Qr{8d zq69Nep=2{_mP=-@V0|sMl;deoII&uCKaL4R zi~55Td_kv?-p$^hf3sB2J@050hY+QdKLbLo}sjjoFWeD%*|c=e)dbJU)8sbCT*U zQvJk0rSd9W!Y$md?~KQ`4?S<1W6L#pdXv_P&K0^1S|-Sv^ai=OfJH;ZDnzTK8)4&! zDq&MSag{5+9%nZ<=1uo$bL-7D6CQSEo?e>OWi!K?$OR)r%Y54}Y>hPz8JjIbPbGc8 zK%!d?(=7tD51x&@$B=uYl(py$e)b{kl30@ z85&0{!ReZz87aSzZK_B-ab`p!5x^fjd5UP<)|Hg;Ad#h(w8x!VJi33gsQGWJ zU|yCO%kE`5FfZEAlrlCKSNamLE`3P=qXN>c?|B$#L>8cDn?R@!wa9vA3t*pHJRa+b zql7IUX^g$vbJ9uuy`_Spx{EmhuW676Z``8NGo7jJ9caFdUf$31B*}0ULYZRdrFn`0 zWT;G5-ZZOs#y9%(O~UY~h&%dHp5npr;0@5niik4i$)9jOPaRR_*FnZTEw^)>)1YdV zsiE)rv*~L!J37AHoj(XyCeWzLO)OoK<2k19%wqi5d_oR2s<8whgz^kK5pAJI-Fg@7 z!lhNg`^xpi+A~pHs?6bK2J0*J*=G6$3Oq<|~!kRCfAmp!+X7G!E;Ukj+s72_M50WN4m-4|dcFE72SaeI@dWA$(17S6oy1YpcT15+fq-M6cVv zIFgk$@p?PC-sphPwJ+~WCWQwR-xW!ME(m?|={$1AQ=yTy@S?3y+I{%!m&Bg|;xYTu zVO0o=!p!~4Y5efSDQyGfI>piQ=&M1u5(hQY_fJ&Ty%}fO+IJx_Kq0tmpDjHI$o<}K z1D|!spstQq?pch z_$?zYsYF|G3~D&yX_0uq?HSmmz#}v9;fuGmy1P;XFa|wD{t9K};RP^BR*>W!#RS

dj+R1b zA;+g4mua>mCq)w}51(a9IsD`kxM1>-1FLrzr#vf{DYD&kRUm$gz%&S#~? zg>A@OeSR!mA+-l?zoi-1p3OSMkk-Y?zd&_Gly8J2oaoVSsjIyFYipp>>EK0Yj7iKb zAE6gs`Dm7dftM#73rXNpH>QBS_GmDzGTs_bWu}$EpkIfcJ%xy*u=~QtN0jjYK%6q6=3n}i)L_7bDZ?FszrTErQaso z)>qrQ#J^7{zU@|b<^Z3V^ck+h_d_XNs9rqQIg8ighbbod1;R3Ck(1s)?yLzei?1p3 zFC=~!H#!VeK6AaFZ!S%>hl{%NeTvUTUoqkCZ0%ycAfi5K-=X4DP30>~)rP&E)%f2m zTPxs{u)sBK@2y>$3qU$gfigKK1b(wv{LoUHwx*7cX;-u-aeAuReSAJ;KQZ1_@)`S} zR*LCiW2b)2=IDa6hhgk|92N?WMH#fM#)c_bWQpu=JakE+9VIDjr+uE3*&liOEG6V)4ql=ji122G^<&b2fHo7*a_{w}6Jjsl z$U4q})STY%Ov2L==RUhV8SsRBLPq<8qp~Hbr5O!B1{u~B*cU4^wz=zhbP@;gu~GfN-c>r5 zgpSt=0&FtjCG06#hynGRqvXIQ22($%yvH|#f}gaGlf~uZo*3}2n;SAd&#;n3+&aRf z0C??oC-RxF#t*IQG{`-=8!r1~W9^VdYePju*x-?ode4I@QSVo;NNuf!ZQko74Y8b1 z300$cZ&^^cJHecbW99ilWp+eQluAwFC)-|u&}aZ~4y&z?^Ml2Z(a`6F)R{YX;^(td zxA^-paUf2l7rnj9QGBS$QZ+#*ugH^EHBKub-%+sfPI5c!9OuPHqNP5UJ|+A-qx5B% zwYw+N51`Rg#WPKyn^@~Ww_a`)@d@OD0+wmoyDMfz_u0D&&+8y!gfS!sYinv!G^~25 zF#^yUR=@KatQDd5K4t8kKa=}_3O61wnt~WU9P;2Vn(dXoO=}uz=^CqbH zpuee{MxC(D3w-ENFZvL+ z_UJfWpV%%C9nO#~S>(+?&7e9cn2y=;KMDt^DBH7*U-Cs?K8+887}4XV zkaU}DUJbXcmil+Di(~VYU_Jb3d336lZ%_yo87P@C`=xFtFVIm5CCa>(zp2!sj_~Zq zx?!UZ+R zJ{)O4T`43_wWhcHL~OhaAcp6tZz zNV13gScR`N$tmIC4s7Idn+o4}N8b;o;(jVZlZ+f~ze#@(7*b}C<9;#Uz}x3!KU+!ll&jV@9+>14 z$^ME%HeUL85tr?1az)*urUB}!v5p#6SKU>8TSV-y>x5{+8afm5uAd&wUre1%aV#?G zWLoRE(%dMLWYDP6Rx!ZKLOi!H3Bosl3*n+|C|In!f`fruv59{%&=P*Rf#ppv+Mr5Zu=ZS5gH>9HI~p^m&&O&)f7>4VF=sGTUA zSZQwq7yp-!BRI_rfQV5qRkbjX9EawF0$7#MRh;&vE|cLEOsK8hqfz5E!gyQ10Vr9q z@u;xl&(;LPt0rC>fG4ASc!hb&Zc=BX8$n@+QrMhZOz!!bPr`{l>p;^2=^I;7`-M8na7_bx$5hWw6Z?C5R6D7x zFAPotSk!Sa=3BJc$*_of{tWZdl(nJoXB-M?*lv*M7Qhr zNcJSUW^?ad`Ph{@;}uhrnv@yF*do}sXzZso$|WOR@*C!$Vr_D&jZLryUm8;Yl}#u2sWS7bp43bE;EJ zJmQr~8*%2#kHyU)Gw1h@5}C(`b3B-=RQ)UuGBze4c(wh*Earz6_*9;!N{FU9CuiyF zOlNP|mY=zTE|Ezqt>2d`DG1#?nT23oT;DNx%PLM-8V^`tM-~L461;tj9P}=>ByQQb zpp3R~^^bpuK;)e+rKpnmD6od{eYuS_fB>%=+=#bGvr~R$jZ4ALRctTfT?(?o5VS&o zy(zF=hmZb-Z0P45y$)9*Stp56CpJqu{2J2BoT}{g2W~RGGfq~0R zq~*@0=`Fu%kXjEOJJ-`chE&V1@Gl(~_&}bt+SslS3qdr?Fzzt>g+=!>5eBq%s~^fs zZqaw>%|eMKWsV-T!C4uRlv6PeDWN_v&zl3C{Mz2qBS-hzv(*L) z=-%WK=ffGn9V3|~s+kW$JlSB`-5$4+OYWcS(t7s9hCjn=da3W{$N=7q=CB>z6i7SN zx|>zQQH2mP;gzny!0H2OJE9gyWnwNjeK1 zGoPT0lPxoFAE!jnfi4S#`6YDlX>k0TPmBNEag>(S)B(C$TS}sW0JJ0|#N|o=L+< z3d5ANFL%=fDrhTO*Mu@`y?FLEUwrNP6X?8Z?8h*u4xj__baSGiczt3k@BwZtyjxtPEq>LER%u z{$9!WSSpc{MdRpd;iCsS=Mm_R9Gvuu&fVJZRd0;LA@f1XfToEw;q@vn7L+G(S2La@FHCaoCm=LQLMH8jdA79Mc_Ab;7ZF|~9k=vlE@fqFZ6w6DX>c}a zf>M{9#^p6|_T+_#GC$Is>-6*}Opvp$9bXg~BWD(BGAz zPq~jpl}5`q3lOB%6@V};SmJCCrRHrXm(8J$-oXn)i3>QI#7!j%T9s%fUr>0D%f z*>30xZ+ZZql%=5)Rf?3^9oyT311hy60(LDXul{n5A=N&c8CTU8ShO|%@O{ceVYY&j z7wM%d5C>$K>a7T?(wM^TV|wx~GS|VRIS8FYsh{!*^P5HeEP`wADnp;TRYmRG>vC>A zc8#8Y{Ds|uRTTkDic2o&`4G0XXgeV{ebHNy$L7A2G3d2xT?H#ZZhlh*!~qY-B>)&B zrA`6%=x~1!pY0?Z**aE};C*4#=wLMu-a5tTXwkXcIbFHAp<+a4EG60J7vSUTIk{* zZmoh@ORK;->Ss4V9AxDlDMeSs6}4n+HV~i=4l4c?W#kx|B>9gPccVQd?ygvbw_3PL zNP;7jUDITMp-Y-DHd4+cG)BA-%a_Q8UB7~Rv)|iv#%T7msD%jY>Ql5(m?Q)6Nvemd z2IoTA%m(H|eJ!bSjU+)+3vgCG?_>KhNO}h4ajE(#F?bT1XQ~N*lUUi?QVF;=)V>#5 zNE%y*fCJ8IAwNrhy2DFLUA!qywz;G9hh2v#_-qW9Os%Y@tN2$#Z|FZP#X3lkK58+y`;I&0!f3|B)o?01Vc@ zP>J_v)i8lpv!qw3H@fuDVLw;bU7Kyog$`AV)zTBrlR%&S>UHG3AQryEHfK2d;JJ=E zDo8X2KE9Ov)`5+jU73S8OOwRu3jL%b+nJz2zM1+^vu&Rb#^6tuI=kR7ViK=jYqNmiiNn6)GO90|Hp84w|a zE-7g&`t~!eKvLfsfM=x|61AdbTmJJW^bXPJJ^Z;d;$-BG7LexGw{VR$ z$!m{B6ZeWq-2D38-CAFRm3P8cyzVwgy)(A<0teY*?UkAP;)|pF`=u*PL4@Z3;}CiO zLMPFRCfg$s8AMv@qX1G6z+Emwm7{!AXaURFKkT&g|H4ff{>IRvtC1iPYo6wC->ZYl zaS5)Z@=d|l=MR$ayor%8%2yAv&t^+~_Vfyiwdrl%JH|jp0y4aTeRykmW;}q2ErBi~ z@M;v0+Ok5~qJU&4k`vj{;y%+ztxZzlO1)5Ao%DY3WJyxZg`zdFPOm|>s5<@i5Xfko zw=lp-bu=Gnb8`WRJ`3C~;lxn8!rvCRWZk#D=dj3fGIJ;Px%0Tyq;_S}B>$Uc3%egO z2qX9O-GsD!24^pKcZctZ?l_s-pY%jdvW_PfcL$e9i5@;ttM{Db01iJ_lQK#Vxr2(Ig$7768NrqVN7@NdQsfAir7q zI2fVG&c+kc-erAXx)xuq10i<0Ud3;g`DmsN762Sy04mtlf!YBOgjU`bt5d5hvin}^ zyDpOx`kQ43xR#PC{y++|0{WT(R19?C8PJcpNx)b-MI1^%VkpAyHwzPvBrp8|m6b*Q zX3^56Z339xWvRR+-!+cn8{fzPfRqR5u=>B9melSzBmDpJS=kM?)^e$>8snOGzV1us z4sQI~=Hc~#V z?uA;N4^su-@4$)seyL0AYlc5~lc*bQj)@u$6U|MP~Z z{4_HdnV$jOjehgb2Q%BO-#IpXe$EGg=KoD*lKvzn4v}YpFTA#C81xqTXNk%CvzXu? z=z;9fFGK$|tN*#RBDdj*|9+ZF#sZ)Jtb;}$FwnFvVnB0qUsYPTL)i|iN~N>5iJU0D z%tE=H8#||AxKd`mb?97Ny{T{QBpXUq;^-Oq`(5lz?m;st& zkGl>7GB87S+=~2O>D&QyNU26y6guDh0h^u29>GekE`njEytqn-NrK#IHI9Zg{$`QEf);Ey-5=c+wFZe#1Q1>G7z_g~qHEEg{pp!g?8s?w zq%*ZECO^ZVf5RlfY09wdzLoP~kPdDp{a5{5z5Hm%)+apnq3$Se+Xz!Qnlb_r)Cwx; zr4Dp2$T!_-IZYiim>2WCRv5Ic)O20-H;aDETGS)WE}+Qr|&iuwrxqFqp&ApT8=5DZRiZFtG`mp| z{58DGSVJ|RyNH@z+W&e*&W}8k%2#)?(JUv}7NKOq;_ z8fXPY2^Jg|bk5A+TrqX_bral{^}fDaCz39O1fnCO-RtDUxakCWlK)mEz>ct)6zA92 zt2jWnTdcb0XmUUiw=FmwFB5{uS#EtGIFS#bIb8>kQ2{}7 z_!7zSP2M8Fvi2~UU{eR}8Ahqp-!!zk`vQlkvpmTDUgW!Rh7zJAu$OKguu#~N8SL+9 ztcbEo9lbJv9JfSTei?8~8s4q#L=tY8(>3=D8Jg7DiGtS%>1oj6C8qR9;b}ws%bF~q z(%6rjB6r@KO5Bj5+dOA0xB}5al10pP1T+`A;3n$Tzbf6yT@DHG4BmfBHr+MX(bz{QRhnu9|Inq6U zz%#9)){Z}tyXpC(33^bzyYeaj^(rr&2dQke)>9nITm@mh_>mf>5Iu%w+ay{b*1r(O z)ijcuX*7_R%zxMiBy#Unyu#Xm(Zrod5kwx(cEEZq^N5;T3+=Iq0aQB#XK|;B2;P^t zw8We=_l8ac3yV^fwr1%Sdq!~kY4*G&5F3J>>>z)&sR?{!QA4ie*b9cVqG7ITy=}n@ zN1PLJZ^xH_$2uMvWAOhh?>g}ZGEwVKB?I_G9t?9B+DA|N^$Z`nQBzYfp zGB?ypN;0TiJjJZ)yZ&?Wake8uVejb9do1q4$LQHw4=U(A(;!?{FhReL%0&B2ml+=i zlIqKDmt14dA3nVzUwOz#{>}LlI$oH#DS5iYY;9Xo1j_n31L!b7Mn-QW9JRGJRUPY% zPjYdeI`A;pUQItzVU5=+?a9^Db9a>o0wzA}dR&C`-6c=_8lFhoYKnYoN9fT< z->eC+Bm8V(p-;|FgIm$3mTFM|!uWNs-nS3vN{*$nc&^jlg+cy+~#Fs`9$Szj<_*bnA3W}->Ze2sfKaUForzuG)}MmjsU z!9(!cXh9G*3J5F9M`Rx!QC{UVn1UVj=5{Zyg-&OdnE6Z}pVMna!LC*LDt-I{TNC=6 zrtT>HUAb>8aJm$GYy%XZI!5PXXyWHSHbPDsI6rBaNKXq=HZ$-Wi>beCq#;(MX&?{$}ii5>kn_0YY}SQ zejZK`kr4!4-;eMUI%T4*#5{u)bf6!)HRL=jqs8H)o#~!#VQ@K{Tc4+m_Ad6mn4$3O zJM-dqs+V#wEt!r?lV*HCX<7}YK5dmBd8))g*RerUvb>S#mGqPzvv(KaP#sR5YqA-Ve5RD9>bgM1|^qheg~ELTcB*Y~6?=&v4l)1K%+cYJNj z(b9R4z%jjzxirA-_V4k|P+VO%sz4)d0N2*p5k!dFR9zFu!hYPB1RJAoI5UmCxR%rysSsgS1N?JFY5MuWj$Z z7T^$AcFF`<7%GlW3uUgV?8BtJ8-^b>;ah8we9@HEQyJxOV(Z#3DxBy0Zx*wG(k5L* z&7kgBexiPOSdP_2J3TSu{uWVbekf?;z%N{$mt8+e18kTA{WtUhd(`s?(xKNZgjkbNsY*B*WHwn#kK;N=F(o zaV;;udR(T-meWM+iHZ}6BoQ}uuLmMuJ`25oM8WVkQ)xh>(J}Y;JI9Sg?h8nb1~f>YboS@#v>WZ-S_(4i*ywjub2PPg zfChD&nB;p)>tAH7WH+p2*HpIAtF~(BsZ-6T9F96jx7IQ>2U4xNj-;ft)?zTt;#N5<9OipVzvvcZXp~2f_<%YHedtXduSGPw#5|;cl0fo<}0G z6JJIaAbyNNpBS-iYl)1@Xr_7_Y`okAKo22QBRg1jUwX0bZEM_G_?_R^~^AM-+>6Z`l< z>R!ViHnhV3>FCt|NMY)iF{z`%^>y|V&C@(6uejdw4t z1YYktk#jPelk3dz>%E|Npu%< zlYmExt2TGZpARGCf63D1I~gJd+YKyh|(3}n9B$a?3e(Ai^)7j^nT+g!|@QlLF{ zq@LG)w(g4L{<^^^nK_LlDwk@$zH7Dm@4tlp<$rq5=eW5Wx8OiwKYF_tunAv0h`i{STgoo|5rFbDD)PPB6`d9t+uLlnwTalmD3x6GY*6gm~|3eP& zzp&~5EquPJ5f5LF!ii!j5fycJ!jxuclMK+I`)KLfdP&RRNl_L1Z$jt zf9a!wZM~tq%8&(@#G7C023L4Q*Fh+Lm-wT8X;!)%KvN3jw0K7A9ynq5ZMOQLEBLUd zqb%MHk9croBy+k#vB$rxJSlh7@gAYo`{bH^7^AFdiglQQuS-k!mUkt}H{}I1E$Bda1LO#dRgVx^|{S}Z}3 z`_uvHdck9S51t=C%vB4?)+mo8e^|wELnFmWhZzR?F-W~d+KaOZ7dJFH@^3fU0eUI5UjtlKae5I+^Sw4&I~k> z^Z&{pSUZVVLBr(N;~?F$8#T$(+Pb(ij^dGNAl^$UbsQg@QPpPeR zv3Tu~`Rt2c_%@D%(}d=$M|F#-V1$z6K!S*y^js}X(!S<(+U<7$ji-hyy5viS6~zOl zcugrZ@%vP-Y`5Vgto4rNO|zT~EUUKQESO>Xfv_41%MBd)I$mwzU8uApHbMLfZEWcY z2_@^nD=0}HpmrVjcA#Bn$h~2P<_CCJ#}5Oa4<0G3D9$sbzrs*oGtBQ+`E z%&+7buB~}T%8F4i8;b+8!-0({u?`MfmBzBY*ZgZI3H@6GvNc;)lk?+4)&O2iC`)zi=hTUH2yYte7G$BX0J~)@cciH#gJxIHvltq+&lS_}Z^&s}s z76wT(2A@F~)(uri^lLdki6Lbw_^rB~N`AyI(vYa#sl9z`U_2=HfO~!oz@=j)ZE&`H zHFs&S(4n?rGro~)>u%#1F{^B*ln4&iSPY!xqG#m7mmC5A;XP4yb~T~`Z!(K!ce_<4;J7*)?ix-a`m2*0ufBUpvBS}i&;qBA;Q&VoDS+9c^+ioZXqwZ3^i22%MsPnA}tV?0S7^*i;o_|T0gIe28jB2os+qCT0G5~r5t@sNW ztrj#l*EQ6ntK%;7*<3n~$Q?8skWNc-@s7swg^4a{fWwBK-r%8c?=5vIi$|hq-NvuEV9%h^Tt_~2JwRfSn7pLZ7?d=x~ zyvnahUzqe=_n8O!!RnGAaQs#@y*bA-`~0Amk;k-y=;z(AypUoCn*Q4c@HhUB@( z^tykA$rbs^c!=pMyZ?Opj-YnMuCOYxL9MVj9s)I{1-8UgXo^>X4g8uawla#VGfE+* z;Q9|0Z(EN2Qe-IXV^oO8=z+LN1sW)dq9uA{`$ZnU=~`ep(rxjR1E;5R%AAqFq)`s< zDXPBJ<)TBZgO6o1$!`Suw#WmnG@Z)ny$TL^K5DRy*AYa6&DLZUO#CF6%5Fw7vku3c z&BytEkrOj|ammT~xMARYsHj}xZx(4)bdO2-;)LC`5u=UT?+Yq)?Z7H0gVSe zecpc`9qJakB^_N|x2M%3KRO4-a#19q<@$Hml)jXtQN5QtsB1#fr0BL#K_i+$qOv_G zabPi92ESUaWm|l2(!r)MGmt&M6wiBV#Me0E&Kob0QOz#cdHFe~yKz&I`v8)pQJ4MK z2jS8zFomhTLq-$ zksD{@`~&*O`A@xjRQ&1K{`U90-T>##1Qn_zktt-)obX_5473)*RQCNDUVqDv{54PV z|NoypAi%PB^#-DVM-VrVB|x>v_H!-q63=4ut!@1uWsl_K&(^GorHEPiG|shi^(psp zFnfsXT9UjmsmAocDF+2D_wwxX_NGTiI4h*R$K~2OwWx@%ieeL4ltpMmZescZ6ww%b zWMXw5iAGvPHtxUH5r8b|2n@N=gQh$%97kHYYop8G|A5)SPDs_qY8oi4JhM3D*?Cv) zN{5F7P=D>P+98cR*GuAH>ml2lAV0NE9~;Plne#pFnJxPYQI_D3`th?JWZ|H9b_IaM zu7~XJUnk-Vh(6(ffdIht!nhVPtN`xhxkyrpCZS$_1-4P9BYmY@POR&VQmN;=_m56i z&P+WVaZKLRMCktE?|EyPJeYzUv!TbCH9HuuTGsZ%@|qhOj$b%F>2j4KWmIryCLLF< z6s5lW;e9os04+y+>IbdAz1`jrmRB1u5m*(w91J#F+0$pP7!VOi1kgyiv8in-H9N4cK@t)cPOdV-oc z3(f*$fJ=I1Riz}xPgcN|VdRyLYN~MI54AdLr5F3eS~17k|HKZ9=E?i0!NCo>awaPb zc0X!tWK1QN&*^G1F7|S9@f8*pmO}-XJf#lB0ugmph6Y`-Y*8a49C2t0d)!mwbI1Nj z+&h8|I9QEmf9Zd*_ufHGe(#$wRzws;X@Y=KM1_!#(v*^@NEZ=7st}bLVu&CmKp-m8 z1q1{X1e78*gdS=lU79ojsUbmnOQ?Z__&(p=cXz(Ooqe~=y!+QWOoo|%Q*mYL4jHI34)ZSGu2p-9|BdDbO>FAe;WWBUAN?*7T=_;?-C{t5eIXurR9T)(gqc z@SqZzp!hfsM0rL+0&WJCgt|4sR^2Tk<{a{6UcrEWKBd(>!l=^t7p5@S#_Of%z`oE> zTF3PA%Qkk*hNLRM;mr4YW$Q4>+wZ1fb+rCsQVygVfslEt7HM;#He^y)H%=f)wE0(arZqP@C;CE(;xV(&}Tg|mUXJtHnPO* zKELC~4^tkNXCLtZUQt&e(is(h^VBTcq&R5sX;fM{^B67 z5_dIXF*mPN1o`X*K=Xv~qiv>0!SPx&pOA81uK8HK{;%no@ZMk6nI)b7M94fmoTlHV zoKA@u+4u(GV#MSz+H`2pH%c)=2-k(&r9^AxdSZ59QOD}VsynCk&c^DGx5?bnJmSr7 zx)fN;on0gD@5PkCTp`k)1&XCmlA0Fkpgb9)&3-lv@9x6>-O`i2Z`z9Qdc3%(>fu|I zt`As{PaNFtpKZI@_pC^X?g?^y-Fj*Am#v;m^{<9oFg0z9c!_}bg6(H}Q2&R`GxmLrGLQjy=;P zaI-7r;q8LjYRUsuH}@dVydPOV&limdxaxd5H@m4@Y+}a=#Bm5;IXyc43u6G#$1vud zK%_nHFY+8i1l;Ri)a%Pai%s@(Mx#KsjmwUXQMBDh5;p;q%bMZ%pAf)w#(99XFzw|< z+qzV7c57HCIWWqWPmAwfsYj@h*np8=j`OU~dkX;0J5fRFWQX^>g3@mN3&r~v@b=&S zlkk#BlSdM)Yr{?8Jz2h+)9sstHUQS=_=U7@N>;!5469Z_H zJ1htQYXEkL4}k&LAyB(~&27w9y(u;$0HFR&Ak4Kt92qaBj9I6w0kjf8460|DC1#cWL|$EPuoDe|J~=yEOhTjlWCde`jeF5&mMO4de+Rcp-ZZeD~uqDkx-*b8!dCZyKZgtfvR#p2I#Aadqp>7w}wGCJF|!NDPWET z@go5yo3+Z7N!G;p2c-A4`Z5#EAI4Xg4bO4pXI{T^NBekwzWP6wVfW(;WPFf9x&dN) zqZ4X-=g`XiZ}Gyv-ep;w!SXpfnN}Rc?33S?N0LsVwH_81lj04D^<~MEliOF5%42);J2>;> zj_sKzJfMq50bR}#p=;$EFMJ)qxZeb(_9x=Q7UGs1QI#!p_Pb`-(c)I~ZTgIRsX@Ek zO2I_s4$6liO1X4}ht*xVI##&{iZgVmgoUpsy~LPK;eP>{^JwZ$M4-?Z+K}oKTnF;a zHOd>g9WNV2eR;@T$vUEl85%nst!r3ZmiO+4&e<13EV=!BmQ^nL^|l0G18P5R^yClR zKK{iN4%JxW^h!gWH`eP#?!kDS#oyuao#$`!H3n% zt|A&+uOlB<-%mR5;K%5=qFzM4-Oc6Cr?P!iW~~A{-t|mrnl`EJL}PrZ-Ml9KDY_A3 zrq$(I_FNn&eb5Ruc1G0n^u7LKxpy92HaGO!2?{*}D}FZ1^=f3ZavuSN3>n5sTHi^m z867KY1-gISdd|9t^U2E8Q2S-K=fFnkIGen`6;C{qRqJ`ucdPSdw)<61?1of1V6Xyg%UnO^HuIB;gU@VpY5vG(kgw;q1Y35u>UM= zUs!uN&WB=uRm(Df@6x zdnLPz?i#P16`DB75%+~)TD+3b)$DfP{<`$O>D1b3-D&W#1?xJKx~s00&nKnb0v)VQ zUFQ^QMZB%t|G4SmNj^8$o`)b9(K(Gn?+wbPp(pQ`jO@SDivkEAg$}zPiXT6k<8Zgd z2W#Bm1>{}6E~s?NjlsQ3teX4Z*~Tdy%MRmk27UrAV$_@3N^kIR3 zVTG|pMgjO(vJr5+JoxwZnMc-=p{(2%p{!}DNB0S5UEAx*bN4@*mUykn{o0*(;a6WE zubQt@qwe(|ICi~9nU{41d|6-!c3<5C5)|ztQGzJp8+-{N0!TW)A;1WM9KJlvK&-TYVRR zUL5dN9(AH-d+UP!Z0Bj!WA@RI1IJZ9Q*;3F{k=;Z84>^OYlA#GKyA&WNo?COLZ^|E zqH<9djdo(h!DVWlj=!(WYtC4~n}vn<6V4uZ)pq;J6*IG`IZ#}+XI@9v;PurB;zn;S z{vG5%;3%0-w+(xkVL|1|rlkO)_|cflUQ}-q?Y*kXX@(VOIz&gcujj>cqTz0>pe>y> z--HraPCo&4!v{l!z2um-ye-9L3wxf)o%&dNudhun9*6S!qd;Bm+1;gaiW>6tsLl-! z#lq?K%#B^Ea`v3iO!oG*H}8k9jTyIGBr-FH=JDMBs-zu}Wq|0RZGI9V0BxNMrA#`b z`@%hI%rL`MkTm+wSlg_jhLQX+JWsDd2T%Jyw-rxmd-=((_|P_*?T*uU!wOkib1p#L z5Nvvn*3$M%!Gd|uC)!FtQr#{McP{2{F~o=cJ6Z^4n0#H*3|3a8}Uf@Q8L40XH}p$1A~ z8y>I)(56$vQJ5zgWCY|gf##i{k}ueK>t`a_;*Ptp3PFe5o(5LRFYi-q6Gr~%_?#Si zJ$GZXf(^?vIN6z0)Yi9YkrvnkD^Cn&-VW?ZR&6#kd65NIOH2b7S(sL)buM4O*&o8Z z<9;=YXW6h^DtA09?`UzI7}Jj>OK%Ip+@h)*3j`!JO$wLAm=Fh*H5E4QmZ_8qTCQG5 zp1sshm34TG=kl_D$tCTyD9d%-(2KOY=ew+Z!(T72$(gY0x9jGAM1X1soM2u@u*B%l zR~oidIjmsk(ex=pzI;Rp%!RP6Iwbh!G~sB!%V`SrnOIS5X(bO=(UQ&nXVwKukC-LH zb;DxatKL|vnl4j87;!>fYaGe2`Z|6pcho6nM*7A?J)rn)7!l+`6HpdHYg@mD_=H@pzR1dk*h&rO1=OYA>QwT%MT zR8Jji2YWAC+ZcZUJwW6wdzu3RbN-na6PnXUDwt={+wVtc;aK)W!b;(5J-4S%5*(#i z(IFKJv<(@k5kymdVCLL4b z+9PEenUg1cT}&?j-(Uc_?+_Ubb_gTJbYy`UK1k$ONaQq9)aEBDCs9?^MBf3V@#PHX zasJ)4T91wA09G z+RGKW8vW1hBPvgd4!nOq`SQx=(c_vItJ*|Q=Gv$Sy2_wg*4U=8je%jK1_WU% z`ws^wKv~lxf}V2X{GU5r`(7e_P#rn^Y(6DsWawsAX?%i` z?_QunC*6Fj7fK; z6Mv1^U4^G{+;0*`1QK1=4?O!U%{K?AhZaE~6YO6irt?Ie@)$}ROL8==CXKtkVjW|G zx3;nSHCD0seYpVXq1~w==y9O>RAD!N*@0(r`|p9WX2+1hnW`k|(U^E%@~1iw$!+E|7w=&DJ+gw7FRpvGaMa zORdS_W!)-W7xp{t3UIIOmaAdO>otA&r2wSA;^;g+=?{ksjBy1(>SJDcpmmq&OT>$z z&BSZz4TNh8dn#Yf4_vJDj$R2fbiP*SR%O>tC`xn+V5;~Ql8Jtyw?&w&d>UmeBafTJ_aaGuyzw_ zmyjRs(URl|fC@fI!6I-J<{4nG?aSLr8f&9Km$z_Z@X5<`R~z!9Unecviw9P>bSg__ z53XWMTfnhy&)t=3rLIF&kJhB0VE5S_r{^@QV~;P0>=)(WY#DwM0%L8iG4iS+kNQ7k z)aWx)JkV+X>2Z*8&oHeP>y^4M@z||nt&?5~^M`{Uj<2}jPgVolc`Qa4;N}M_ z8#4@N5{KK=yk-kN%Zn&{QEtVd;F2xRp9Y`5R#u?KUPFCok}(UGh7Dm+K(C>i1i%zJ z!B}899_b0yaJ*5`JK5T}xV>Ta(U3DjPVcFIV(jsMJl6?2W^3FL;>pDA-N~V?8pQC; zCDalEnAqZN+GykD8_Y-gKu#E8(Wjp=KA*-%uV@P`x53?zhEWEkjZKgOh`s{F zXGCvaI(CB!0TPgg2N=;TbS?F03>|FOuc7KrA&F0-ZH)bG)#xjf3S$}M3^Daitj$?* zT@&eUemAof2{{wd4wHyUlLvtl0kIL+I3iyr0*+1> zz=&#C6GA~mK}jR8!o^XqmDKmNFidB-Lma|0dcbxx7mVonD3ii_0@YA8bTKowDaWmQJHx^?|-Pu@%FhrC|B zl+BS9)=)_aI>?n@#;aJ8QCIMdT-nnE2)4@)dPa#hlUxG?MGGQRb_R+l{dc_!PtRoj zlnzH;T>n5}RokF$w4StgA=6CKgZ$P)ScU~n)CH+xT3VEB7=Cg|hg1Pq`o2Nj8=3G45G|d>uVu5JL$crJZG~~w ziRJ-Xy$_ug1&217Rh!u3A3p16S5>F!QJ4=TzrU6t^o%;FKO8&_e_Eh2y5h7iir6sn zi(o`0v9g>T;hW@VJ zwgIWgk4Ne*E6l&a&n7xNJ#&b^x?*OX)NTb^L#&CFlRD>$(u0^W1()!g`=6jF)JcpB zEi_1{_Arr;o)f7YW!wa%#eA2p{bzCX@%zcK@IDpf!{7w2P)scN7a)A z;{u~P)K>C`@9URORFWwEto;%AZ>Gdu6WAqS-1;#xQ>Yk;)1Fw-#pf=Q{ItQ4@Z9+_ ztuKq$79ZU5ciHGs9={YLZK8uIwxxzYoje>WZIOqu+^=Etvjoa}XNEPb_|s`|mRQ$? z61a+xb=Qu$CFja(x$D{PsMkEYL z+GX00s0?&sr#X?H7_J(&2`ed#zm*H8Yv@ZaUCRsO0BqYA_+~WMU1P;q-y)SWm(fUuVc zN2mxRsk_Mock8PhDpKy1SE&m1oJx3Xtmbn|>Gh@J&(7zQw@sKfEOiy=DK{)dIvB|o zyh*T=jm0@X9C(5-@s4|Ky7RqPY_WIkw&|}(sI@oHQ@Dz}WsD;ViTVsj71?&@TTZ}Dh@Wy!25{SI3s+Te#z0Qf z*GZ~U%B8g%SQ&p(!+ztE;|ya2-yN_$zI{b5m?6Q`Wf*Qp4xozaF8ky^($3LoTjghW@Y_^1ws<2hhV|e+<)a-mhNZcOd6}Erg$SvsXhm zZxmgs&TY(PS(H~G=G3^P6W8A!#9zCKsH_}$Cp>oUYJRdTi~kHorJmN$Z0SSC zZIiW%t3!Ia>U+IJUyPzjTCr#E=ZGu2-lU6q%9xEoqA2JJr;NuG4HP_ zFl8sdU&YcaLSw)AkzUv7Q-%jvTGL?~GI_csHyNjTZ^^yZpg5dc6vU z5+4i0!OtdjXAP6R+@>Z{>+tJFIXfP;+a(K|7V+pa|AIXL&6(Q_Q*F9~g`g48I75LHqB>Y5)QeP2E+{%b=CtJ8}#hR?@un+zz9$~cqer)$5 zn+b|3n8!!d=`tT-0&4e*m2&zpVsMEjKxwi<(@R6hAMu-!X;)rZUqV0(#*Ube{*FvU z%l|&)altD2veUt_Mb!U#{>q{D-!f(Xf9KY3@Q&G2W=rTVwlx`FC@5ZFIk_MvC25tJ zUl)dmuG-s?;m9n#0sjQ;&VSB6<>-;N$mz2h{F;AjAhFwKO)2x?K%PkKgc5st3JS;w z(2U4u7(xx(d%TTM9ymm#H&C9zGu;3^RgSxXD@1x0%$7bu;N)F9iWcPbPq5ABVx`TG z&&VkPd-3Z4bkH>P8~HQ?Wy3&B#~eYgV2Fy5+*yP>}R7?UiY%Z z_LJGy;SjF@d(BP-X12YW)HB|9yMo?xt?#(^&k zY%`fBPzW7E+20P&U>LU$WlD!yQwzHaV*%UHZj^MtR!+X)gLwToNAN)3#$(_jJ<_OwC5m3 zeL{|gdQl3x9}GRo6-M3>Jwa1%&QloxM31Y+4}6!9vkYv3ovkyXy=bvJ+qct8}?!=0=FuqSj4*v6ff!ZGq z_y}vD#(QyK&~Cv~hkv1!-~JXx&NyD*dS4+SwQzJI{TR!8KLH{aIPOC}qhXwnR*(qZ zNltI}+-MDhp%5uakE-uMzfLSe1TK|OES;gGS09-ClvxNeIO}sQ3{}p|E07gfB!)Zzm z)rs#VRMaKu1SpN(MXqv-qRMa2c_co6%N;d+{sYMc_7cX&z7p6c&c|qSrwvn&v{VcS zJJVWm6fh6Q05})(t#Ta&p|>?wLLL#5_{;2vZ$;MTGu~(YUTwV6@x7ZK9VHvCaUp-9@0v7W`-P1ZbmGCh z*_oAY-_SQK(dn2G!nPvKIFzNF0~c4I8^2^hv*AfusvFuu=BZQ6LgW>I zet(xF)TA$C9rq`HaY{#UOJW`y|UoEoM1& z+KKVRFFC1E-S7_w4z*D@$9H;ZP#*Abd(Aq+j02WvM z?SU5YbHv26vJs%|%4A&D&gnC9S5k|cmo08Qdwfuja})!fN)>J3BP&E^!IN(<`jjqO zL^`@AQdbvEKRU-h7M*#~d86~{qLHf^PRhtDMwWa;6jN0HZ09(UwWwO2Y8;n4hE-Rd z@yZxyv~Abzzpt~(3mk>gO3T-dGuGOOBEuT8C`xBNv{B+z4lOY|y9f0`qf-i5R~|h( zsaD9x1JX!OE`4e72sVPlfSp)4uM%-DQP>;}sh;1Xt&l>4HMKN1uwSvQb>II{3y zsSQy!7tmT2=BL}xGeV&D08Xf!QO};Sl&P?juA{cm1<0@MUsLKI8;dmtd`YkqKzO{( z_j&m(pQtjp#uNArD_D19YArZTcBt+`(IQ~txJ42^>C+_6PoHLBO%MSvQ`LVatjJm~ z4py0nI9+y%@z7J{=zO&Ynup{zt&0 zt9i;pUvtSCNUtqo49D8_D~Erq1w7nD7@nMDBcat+0iC~B@&N_YgDj){Xk`))22AVj zX#&52o~gS)=b2DOtSH(HK%_<=!RA$mlene6=7CnaBK=X%+xh$RBMSu{XF_i*{bq2| z)tkr_&`9OAQ;g$nRJ%yJEX6i7WNW0b!Un+&U6H$vzq`Gzt0+%28o0+NuJkyi{qbif zVrNE1dx2)frp4P#hgxfxy4++#*BW+-`W7FshGrG5RIdX<>}NVD#?3Kr_Szx}l?%Ya zKxXD?K6Ia9+(ZNqKijIe*GoA3nsRz}RG-wly~};tP$RllboB)2Ibc+fwETk3Nky#) z&{sR}_H(i%$znutYjhyFITCwN`?H;O3z}%#b)|QX*-WSHMH0ZPlg3;N$|8k^YL9%?p}#% zteSTFR$0V*qZ2IVzQAwRrgsUHKswec@!sOc?{(&CfGr?tX8`+@-qF@f2C%}FLsW8z zgc$oY+^?-^?G!piRyt94>*d3B|K4d0ogU4i?uJIA{m zN>9>b2vLlzv4_TBXi_6q988pY#PDvjDL?v$!#J5bA5%7NupqcnR&d~QRG8^I-1^XP zB=Ho+t+#?a8rY=6xCO_Jvb>2QVp0#%o4xt89>=*<4n64h6@`94Qr#~ee)8KRCakfv zHeg2ExGbj3Gre&bREImZx0u&PuPB1}tUN@Or>@O)-w5Q%B5{v1k{V!T=v@SLhJID zpy4&RsYqE{_W;E80Xz!4;`mC);VldUzdRv6#nE)?9uKs!XGEPkx4beUXxL@giiF10DL$dt{NXqUn6lEv zg0mC92rPr0LAX!w^PUauE}z5}-&&q2Kd4AayS|@7RJOx__A?>E3qZSkVNNMyZd%ja zpZAO*enh4+$=*k-PG@~lZ1(9xzGUSn+xGis?Z1KcxU`M0^l82qQ1^-WO~(JS{#wH- ze~~bb$E#}o;izB35C>SiJw1^UoKNWzEFgJhz$p`kMniR==}wSrGaE_>p*SSAvu<40 zvvz>hZdVgq_)d6ee+uH{6oR_oe+5x}n%B;;e-h92Z{9HyntoUrSuh_c#r_pBIm(Sw z6BZ~`kg{MtIBR##H}=c@=zCK-ax3Y_G4iywms{P=roF5ipXW!D!d4-}1-8Ey=r%q1 zM!<2y+4FusiFr>Sx9GQSRS#2UjVEJ6^fCxb!5eE7D9&xY z$kue8Iz%oyXB*8(@hMsq5vvSWNvDD%@kMs7#Q~+4e@C=Zk+&UQ-lm67$4*W}X1u+q zFHUA2M5|M=uLI99UJp=}hr?vb)jA2mIE;So=%V$0$j2ESLAj-ZM@>pZm-ivOQimN* zO7XSTnLO-96h+w#*euKuiKwb-Pdfq&v}?0!8@oGCF5zu47IPt;(cl95M}UaI1rb5_ zH2Mt&{|Kf+&sZ&~StEo%HC*Li+%_KB*eEKkT>IXacRH&0P%^H7-E4Q65evvcXq6{T z%{7aOQf)BT2Q|Ric8o}Bd75;2ZB{VFjIQBjCFTv7UfD+h5>qH8Gu z_h`n)Y(xuUbGXqFM<5-5FWEmog$a{`HcZUONJq0{Sd#QlX;g7Y$f$5JUm3*di`X zqCCsOfvBJj9;=8UOc|fFNx-|MAW;*u?=>|Cp_&u76v? z4yotiu1YOpvaK<5Tb_|Z6V+v?P8&hBOBqqqs1uAOTYCCiPuvx@C~rk~;n;YK>aUp^ z2OXOiO0F8MT9SYeZ_5kf`TZE8B07F5rn&z1QiF;+qiARPYdT%-ZZC4H)`IKh7H2!HYq)(&cT z)DYnbxeR!D({sM1m#em}opm7}!BV6MjJ(hD=H`6*R3nv18@b$PPSIx7z|SL_RHsnOWpqTZBl^Tru{qQ6V+p*A<70hd`c7xGOvN6bT=`(x-LH z-jAPKL0!H3co&%lna_BG3$!C8WW@oonbq#M0A8}J1;j98*Vh3(4ppWE%W^-OrAsqz zwsw9~NW3sUZ_OFz3FCv0+LfT5Duq|r8i2ILWvefhjx~1lrd9=fzLps@=lo{bdj z*j~T)s5@W{_v_wzZQ{zrF401Czb8AEb^#FcyTVdnl#}>2Q3so-aEvlNwAt=rP*Y_7 z$e3{f+&cTMcjNk`zVxZA@04_hZqZYZPG`MkLYG0b+BUWzV}iccl6Hd@mGkBdl{0Kv z8FX=y{NOdi5G9y$_(*D|hThp@&pVDzjTsh+5#MKgO|N=yp_j{wd(GT$*&)fD*~#wO zBY;ypuuOjz1k1%>P7SxFp8#}$vjOumT3TmwvQHbxKplC;GNoBig=XEnr(E$qNhnr* zm*gDP{6_xC%+n%`;%wWhKOE8IGW<4pWB3 zFC>A8yBb%ZVeIZ=2mme;e6K!huk>#e1?3?|ZLpH_Kn;{o~EV;tV&=ztD z+7A%fs*CmxNKtg<9>it=f>~Ix26jSeXT||LO<;DkKmb7J*T%v zgU@jQxv~>We*c{`oioTQuMWk8vToN8<21y)C=2K5ye%UYW5_288VOZp16{)#zI+{$ zC!}MMBX`}SA?y1S+LszGRqYrBtQ!B}Kq1O(a&Qan)t9~D`yFzBIG)+TSM2A2h&J5x zha;TCvxyh7M)xv`1{$HrmuPhZx*Hx|hz>HW20T<=*&41zchC z?KVHvRITqU2?d~h(4f^c!kwlGeTBH$-E0}8ntu;UYGb-ZqS%f^AEpRJxP4~8rlCiG zuWP_iFb=&zea4n247H?XAN{=NQD$em>s_i@47)m{vOjwlJz<-(Vhdm|q?O<);{%Kc zSg@Tmt*C%D)?xw}+dk3oIhvY>7+Q07N8Tv^sllv?Gt=Yazmp#%Va zsY@Lo{peV7tE@#v>IUMJSOeyS&P?);b}7o-{@bm{R8dM%9wj{_?FtRY!U~u72&2<@ zPcwd^gUtL5{mp=0>2#x^Ydw7F3Po6vL3!sn5Y^&~}>EL|Gd2L1Nid{goY~0TvwG!i_U*y5BXV zII8RKm|ef5bZ_CRxJgBnJrCm<2TxB>;4#*7`q3yRh+%5MxRrzUxyO*8sskQsedv%@ zs@w;V=UAk6ZCN<1Yh7ACJE5^F`{%J=N4=Qq5!os^KX&RISmUDbg~c_5oZkh5%Pnmk zCUyn}-H5#j@?aV&A0td6?iX!B!w_CBO5Nawy_iskO7*&(g(TQUNuynHrVbU`uWMgrh7?O%e^-H*w;Go-kB0qNEcx0p>B zIvQLzu_&4Pl<5=^`w5U@Yp*kYF?CV}cD5;F44v%KMHt%CYL`jPR1~x{`Q4;na{SA? zeBZa(jXJ`ToBxMh*3JPAh>l*_uImjVHy>zL$>(U~LWo<&Y(cR#);e(XA=aWu#H+{So)AQI)>T1D0=3gx3A7Zqp#^HEeIbHx> zw&3G2qL$t4ZV)Wo+Ew`FrR?Fnv&#t8z>v+X%zxHnCmTP)c*M7e)Bv z?3&}PY)89uxx=zL@AyEIX0>N|FL4RpT1*nxWZ9F;vPN} zrGTqQy-k%+@2ISd-czXvKVvWGn`1wBgqvB~^F?;q`RfwoEZwoPewtSV9sh_PP@jrx z3j<4+yk_^S->^ey2ZL}=D_|Gl((eUdMJmQ#@I0RJgT%Z{%)wtQ11h?7Kwm?738!{a z)#hh;&vFAe{IuAlS<=-`XX(pV*4}Y}GG7%leh3__Ca2N*;Jd!$J3C}jJ?=Z4NTTiS z`ViP-*;S+va)m@RQ4M28D@w?)LfbUe-1|Zi#hyX%2AtS?yImGJSDDr!7WYk9HR-(m zhm+Zdgvs75vk)%!(pncchHEt^mS^P@>DFHlG!fl6gVo|dbCaCI3=oihYf$eP%uTWiZ1_(625B&}Xg85HQBle7ZGBk>X5(@rYowhpMhyum-KO23})e#K}o_jAgk|^b2|0wu4 z-@{wR_v}z>usP!VYp~RT@yjK#(BaE>AJH_lu;;8EE!J+GGNi zcX^-Fasc42HH-5PdOF)ee47=GP}^0HC!e02O}xJ-sB-vV2jcb+8=%L(uVMsc8s;uq z3k7@HJ{1!mTDwyd*Jkol{MdBjW)p<@7 zEIuS1&;H^U-;RG=gaQ;onz(sir*msSDPmV_I-^vZyiY9*_1o5zuj`5gKbU!FR>^t& zzGgvI(c3#e7dsG6ztmwjGa}?J{O=hc-S_1`u#WvE5n>M1+R1I2JZ&wo zuMeJU;tb#E>Yr=Qv8&3w7nuQ{PB#%J92)eUZF`YVtMAs{rB%M7*=#4j-e^ka1^9f5 zaFKEO+5QPE<;l2?Wb$PZ8{LmGo|bbTM28JCiMLCN6%)2doPnM0E=0xtt)5fpvDQhA z8X7H$$)o9Kb3r_!zqLf{6~s3i^CV5Rx@wt&#H)`eCtjVOsA%tlrv6ghMp;s8mM#53rN10(n5o|BqjMA z+z;z+{78+~=DF8hYj#A&7)~?Yc48oN*uVT%>nXgUL{1ux(1cL#rKD^3BA-cD{VbyD zuNo?u9B19i9@oM%GA;V+uEn)ca6%7~Ck-V|WJTP^ zmFPY2X?AG-O0|0H9+zu&n)eBfMDYr3jFNp&QIf`9UL37hEqkH)J44^jKX20MEC)w; zmE?)%VL{Sw)4~lDGCv6<)jN*vT^d73|7N9_E6xOV-@dov@s(NOJ+rt!I3JxDaj7u! ze?~pj;AJ0IrLAZ6+`f7jTYgx>d8N9-EvoEqOn$C=$#kp$1EtlOT; z7uexF1&>?uJGt^nb=dFG@NN#OoU#wRn5C0+Lv_RQIA?0D9=(VVYJbv zEoiMdpm9NwpGPNsL00At4{miNnsd;{MUu%4P-jMr4;8u+fMgGUZOXwq)(?69!c;rP z_vpDdug$JF_sE^ImjFxY7wJ}#k~T?{H$`9Pe&cq4PLAsJ?24_PV>`go1gs^w zKSHE!<^?xWAFghNA6Z)PE?V$8@@hQY`5RjTm;zgL3rZ@03B?>? zsr=jukh@o33lXSg+E$VrL5pKaiA&3sWy&4@JZC>W6CRtWE6)r}{RwsI;X#P=(!YkH zz2qW!#axZ^$xr8-K;_Ht^2<-_2uxpk9MxB9^~l-h?h~89?;>71p>MF@j|1 zsmEpysOaH)jlE8-+38L}54jLQG*Uxkfbmou##IGiUMuyK{HhbV6PcsG&M~ufmb%{b zyf$NU`s~;@d^w4dR;LVrKtv{0Xar#Wzlj5kd__dtX9E|n`>Z;fwI0TY4W{Q=+S^4w zdIT#mgGiLo3Il}zch*Ag0(Tt) zsu|y|w5`^?uPuU0PsHM=c~x4DfW4SWJWB|ier44&y<~F*yj(k_6c0;p5fZFXuFr1G zQ@a~C(Q|zFLDHEk9q07k=5(7}G+RATW*2J-I|(_rfaTKQ`pWRpj&|9cPyj1{)tuAd zTrz3;(jHd7eVm+}q|2_#;@pM?9lKQ(mK%@v8Sb%^fQ3;e|6eLw|D)e>(3WJc))I=X zL4V(L^x|T_@+agb2Lml_ZE89d-{IVH9BX6o6hO>*r9zM{KY1{&)232I2u0Vk z9=khvIz%+LGdNak>sVuRkwFXRNZ7}&eo3Vt#K`Qx_THTBnbneMyf`Did5;+BPGO?> zCdREsU9H7F{ZgpJ+E>0l=CE;4e|FdCg3I)HS4l=;n$mt^Pct_&Oc@p?4ymCPeK}|1 z3K8~wW^?antd!n}^t;iNZ*k9#-if9NbV78*noyTFeq2l2sa>hvEs#TQSN3*%x^QeA z$gni|1DUmJeTbq-sJj#k4Ak;N(fGC{kEKl4Yc9l7!}bc81jF0|CDl^uw+!pF(pM)5 zpNpU^01f3d6zF*nWNhotA?3^&25l(E%kA3@4A23mba{G!X36f9) zg`2!wlMI!P(a}46ZT4QyMJ=@^kd&-Onm>GBtb}@yVpVDU~ zX6YB9EOo;t3Sig_k*54S6y|!!C4@KSF^4fu3}5$b%r)%m%49;2Y(?S$c+Sl<_1ZD` zrPz>E`93#7^o=a@YKM=(KVO~7?~W;x>wxgHJKQlh08PQXMdEcjA=-)7l43dUz7%R4 zEq3ATVYj~Q1YEJNh%y%ubv41~h+O@SU%k(8ZjKzTg5^M78qf-PBO@#W+GL%!BL#lZ z<~P6C@L#)!{*6WXzkAJgE(rSDx)Lzw!_Mnesr`o&pt4>fU>z@TC-~oha&hdmZ>ol| z$C0$0OS=%4!ix#K-|Nqs1u;b0nRn7Rd55qx3wOY5#iGE(ps9KW>Jra>wb#Qwz6xbT zK>B3&4{fEh1jey+fyJLDkN^Jt@A&+kAAi@!-}vx1KK|Vw{_ciyJxE_<4l!TU zv>&fji@g0q!#BzM#f0`bi^TS7ZsTX`I)`K9e#LT2oP<6DhXQGDot1cG=3;UN%=Z6b z@4cg%{@!&#tbhn8NEcA5h=58{O0dyHM3mm5(g_fy1__CPfOG)?1p$>NQbG?s(gg&h zOAtarFM)&-0x8btJ8Ra={LY=T&Y5-Y%$j@0KjK=VN%m*6-*>;|dEUp?F!-^*HtU1o z(%noLWiz`g_={u=MVc$lVRGKSQqmP72T~?uX7z!9)GJuNncIwHk820OPi-3G;hfCE zFV^;*UmaHYS3EJ!{Jw@4s&wPuXFH=)!#j@N*ku1SmlNfn%_tXXJ5WQHohlQ%nespVIX!SjBkPIqX23F<(g4c!^LiMZ8O$-6 zOH&;Il-$lJ_eroh|JLz8?*K^tF!JaE1d}M{X|irY`Gl}|!a{=R3A|^SGGa?t>dR3V z?d7|==l91D8yau8L+Kn;bMlc65G(cAY~pFUS_8=()5hkhv}^1z_hY~_}gaGBlz{^k!s8uo1Nu%MjXs4Ag^5!<)8|+d5m)G-%9hCP)|sP~y@MMo2Qx@q;*2q z=ju+M8?ew!Gkt0BMT?sz3ow1!7*D(Q@Ze5+$*u#-uo`WNk%we~&g;7j;YCWYvS&|W zyJFGvS8qQ=)uC&#p+0;qru`3n%<>;{tZ(c3kKOGXbZb_BSUI{9=SY-frH?}k$YU=G z_H3>-E{8aig6E)7AZ-|!25p;ApE%IGx-U@Wo^J6BEU8y{sdiJ#P4T?5e@6v>+=rKw zN4YBNQQ=}DIjuRYO>()@wx#(4skzpZ{fATKIl2!^o|8}gb}oVI8lk~v6(!h}m&@vx zgIn|&dYga$YDN{r8_4}E^nfhz!J+>^ls|&<$DLBloP{th&Y6tN-iTRI$!Kn4m&%$mw}=ZX zQg)qhEhWuSQmHAyL8K8ovXsN=#5Rnu_3Y1e&+5Kcqi`jaT4nGgqtdm(sfqJgV@cSB zkCW#gaFkgEg1JDaJsMCQ+_4Rt(d)wlU_pD`&nv;s*5Wi_?5Nar?ET>hUYt?b&BLZ| z*ih=!+oYWCnG3PA6R+s@v$Xib6SHwc)Ao@55a;KP1b5=jTD->ZUwIF!mlM#O%8UZR zyPjqb_Ai~VfZ*18nGY9zcAh6nR83@1YKwS>8of4=vxbUbvxWY^s(AD?aRsDF9m0dL zMmI?XHH5^-nM=b{hK&>czp9Z(aPENt0Ts;E-};v_ntR{7WR-t?dM=OmL)dY5Wg|_g+YVD2Y)G$6zem0rRiAmX2pu^QqjQChlEB)we)w_0W^*yPf~14FBKjHi)Uy zJmD}hYMn+(Te2rXUjejW?GIP&a5JkJiIbkybq_xF#2+!%8UWA7-#X8>u$aguK1Mez z`t8;t*3wixQdSdOY#SsK*)~ZqMA@6AMhjaSv|l-NGtm5KT)B|yH|8%4_bxIfr%E7> zuKf`kg0UK_-ab}}$w&%9)!$dYZZ69GP`;#=%vG<{LF>P0!X4xw5@i&&inFJ|y(pRJ zH0QLDb?Quujg-kEZ%q?1_pOSme=j<4VqxzExGZD&Rpycp*S+>k0q4!>PDw9{8y>f8O3iqb0|E#qCc`E5~JXmVD$GFq!{F<3tutvD+p z{XA}-u20pT%fX&*IS;Dm29d=g_BW=Rf0ROeRV%xNdsp~f zu8QNF&CX0KH+@;j`yh$dUms~fH~Kc&j1{`<)XiUVgD-e$icOFLwPQupg>c+LygNkfoqiZ(}Vm#Dl?r?0Mt$!OjN*)9XaVP5uV5&#+ zt@TU`>Ju+5=9Q7sCOn{`(01F`({nyf1{T3PSl{O)gz{mzG7n>B(PSN9freC&RxnKi z*|v}f5P~=W0xXw_>jE|44-1X=(W_6XNP=S!&(?9R?;PO?&2nn<`%ICV#q^EUoi&db zO}bGsr5<(3(Kv5otl55vn^6@ESLpErH84If<)-1!sjRa&L7Y2}n|k(zp;CXt>Lujm z$)+Lsx12AnPLd3c#__P;CCY~}-ZhB=ck@5Cr=ACEAfuXYyL;mx0WHb&_6B{wSHBI8 zro2l^)=}+R(Q58au_r=pW`^y)_O34#B6Obt&AY4r8I1)tR9Qso+mTZS(L&!vNy^~s z$W|?khbWX%MlvpZy~`bqVTIdxt3ZsVZ@15!9?lDYKXt|1sVd>t4E#=a>NuCPqln+d2N`%5ihIXQ&bWYc7a!@x3eCU{_5Xcis&dK8C zt5#oLcJK@-Dqgy`auVG9=0ctMRYUpRRCF+Z5h=Grc!|hLTJ+Mh&4RRp4h=yS2T*Xz zn*SHb!;*lcyX3Ztk54@J@?-8KEzWx#5xH$NW11d1{~WkHG$C*jHX>wL<2J?S1D4b(hE~j1F>322&yslR`gcLKe_%6#?f<1 z@SN>Ec5lw7uszBwnLnLo59|AeRZPh>2DHIp+-Bo>qh93b(0F@a)|obB8+zzZ-%=t7*uL`gXuTlZrddpr~0Sadtu-S3sLV_WEO3KpJ#|dTIsG&gP`f z&|1Z$6T3mDGdoWkDuLvNpN58N1cc@`i7C7T#d~TwUPoJfx_vy|S7UIYSL})QKzE|FBkN!~bRu=- z>Gd)&tt2(-?)x|zXiI@D`G(`)YSp&H$%fLCL61Za9dq5G`;9~N@2 z4*p05fUgFs;9QCTv2lUL%hU=(3I)`F218u`u&@ds={||n8|%oWmMVba9E)LIzae7Q zGi&rk?D}b^1l^0v?H9k1Zi;p~A^(F<=Crqt#D1g(lW^@Ab}dN=&_p`nrlAz+Cn5^; z+DH5{1xrd5_UZ#B&v<;PcX7FPHchsu^eZr};9sBO-+ql(?MAy-{8xDa<|$3_{ml|! zpgds{pwyTGpfAxx`W{g8e;)XUrCaX5(a#0wY~z5bH?5sGfCHNQZ@&D&@iD&G zf3pB!(%8Q~$=?P2-J!o{=-+oTf3564a>IXj=dF1>HmQ}YhTc$ktAQiKvElvi$WV94u`YazQ`dkmC zmLgYyTF>_#lFTH@|e(Nj2H7qB29lC_Rg{~ch$6+d zjnA9}T>_*XPwI*_5@aFaY_-+d=1&fH9jEvmt9u4pq3psEiAgjTDl6sJ$WDFJos5eA>AlV#9V4Y~Tg{w%hyi4ce)}L6_id>I z$)#nRN3Al@-zrEOxFuDSA~dOTkKZ8YEYbf_!gCAeNyJYkH{;_(M|YbrhEhigX!OOG zIx={Ikrk`__-a>~tvPByA0A2m<4$#I)9#j+w#0f7#XnX zJhJ2)9u_!#SAOB5HxkjexgVlEn~42KvFkKb0XFyztA?Tso}p_4(+H*bb)2$w%0tT3 z(r|kTVb|3^S1_j3u7rno207tK7UGi|Gb3%4n2s!M+b(Q%hD%TFXf1=IEd7Zc6qMH82N|z)wjA4KkC0v z*YakMBWH{;66u2g;T|QBedZd&8#a`5hBNyUXB6nOdWr!mZqCzdRMmOuwVQWFBe-ORgs_#i5P?Vy6&Du2`L%9fz#zM z!l$0D4^q3n56~$&N(y^WL;0z)p53z#i!L;OTtI@1W~QX8YH-DtBSTu^zz6#?GMj?Q z4&uk$b@<_${$>k_)QJR?rGzZiYz)SwBc3SC8x^W|DuQ@#h-H4XAIg^%E+PDpNZyq0C&>sd@ z?-2({ITTg&$P~dOUS*TZ`0LWT1;T71ed6&m8%;MJ!}o_~%Z+pEOE)LiD48T~BAB$V z^@z0DsR54JQG3%i!S5Y-^Bl{&h4`1rvaPI+Uu)T}En+~-<8X6ot2=NQxETd-M?Ck- zadAQaMsD81h|l9n@aK?EMjuxwr1LQYY_5@dr&~5vv)H)0laewh&|kKA*Dg(^sVPEK z2T}+wIl?>zvm~%al?-p*uOgcl)TU#btMfQ==S!UJXOlvuugfKOpijK2U+IDp^)qX# z7Y%X2rHZA+7RDYIne%z+lknzEl>7@X*T4zg((I^02V|=?#s|-?Q1&G?ThcHPy{~FS(03Kkl`MUG#{_5|K zOd~W|(54s6#kWUh3RKUy7n!Y@3X)2kg`R=6TU3xXL)SsAbA8hn$5&*AcnmszC72$`t31k5sVChP(6C*7 zkl3ybZN;6?aY1o`Z6JM6K%J(2JP}g7Cg6PKaoWnlpjMUFxp$N_*Vdlvt*A&Ts%#Nl z4sfE!>EH>t5&xhF-HgO6ox|`bD^AcHKA!9o+R&=IXBhsos2CVi3QA?|)QQ5BquLch z(0~$}03fZ+$a{av1LHXSkboWb?mwFx`c?<_Wv4Vqn)RZIp+g+l+ugTt_48)mG}!*0 zy#ZRN^ohr?{l4S(iQMFx&DspN%^l0uL8`pc`X2cY3t>7jv`LaI(Pu?g{IaF%=nd$$ zKEb%Px}N%Nc%og7g?H=vG9-^t2%ts)g9WCrJKEps1xAl2R1=Rp9Ch3--JuefFfFip zG3X>QH`!FoJaX$3;?dyo=EpEsV}1(@+$2TEbwo&bK~M=9Rf+Jv923=U z3JV*wuMS>_F@QRO?w(^Dth;gwo;2)Hr6@&c$>vje+XHOWq6*v()RGF?YAFF~YAaAL zh%l+>tlQzw3Ex=d9yMI(JCmP`C-%u^k&2O&BWiT0&cGD4wrM|9<0P@atT+Cy!r5 zi{9PqgaaZTtPo_66crfs^dtnh)$X*~fJtV6R(1C`a2Sqsr)Hu_eHM3O1jZGe92bGRtZQp41yo$b5TJtWn>!(_r&x0>3!zguhbp&lq#LaIs@2kZpV{g2C z02KhHI=HSTyzxVr5eZmiosm%?v#C1L+?2wDfAR(K$*8(ADuPXb-f=xRETT^ z6eygvA9aVfC()fr2dl@yQbr2-^J*ehaf}K){}d9ttwx)e2LP`Sa$*+*IE?Fvfrfi~ z*q0-!gQvk(GgVF@pT~!TbKQ@KEvtp)GT+JWL5SkmL?(MXO&g}kWQ!_-*tmwl&qIq= zKu6FMo?7O7Z*QA7HeZ&iXtWPqyq0FcQC@M+@FfciSKMuiV$)1TzEsonT>lFCJ|p;v z{$6%(4)#mtmPNW!@dWsO)C#;sYUCq{JT}AZQAtJ9tef3OZC?j$Zo_{NC;zwSY7Pec zc{}6nx>gW?<(UH>Y=2Y>C)=Meyk*2W*@gI3p6P`l24#kNMeKz{N-o>_0v~(%kdZ4@4x(7Nmok`NV)XWuHzszQ$Dxo zV(-nPrvT3_4|p;O#8Ar###_v6G*gs}?ht)}YexuCSDoMu}mJfr!zb-y|cUY50 zzc+PHJdeUFrF9C1komhEIHS*z`& zy0wu>X0;irT(jy?ZO1ZMLr zJW`@mICwBgejU}PfB+{ZJ@kNSn0t7!k9tov>S5tP0CXTv;-;UG1!mDFPGXk`<2iE> zBXE>jN^=ziTaz`A;w2SxD>ad5+A$38e*8;B@kCttIUgJV@uTyNQ2ZHRn9|*||LDLN zRgO0qUhO6Qs(l~gHgQp- z4*7CYQa^SFys`C23!5a|JW~=N#PY0n_%dg6k7qUl3hb&Yb2OWW3JB=ZHK_9BT#7X* zrwtREcH)ws3EztFr2uvH zN750|zq_DmuRL1%Yin(mTQ+O9D@w`SOs<>g6y^!zTsIAq7`nDA?V8neH{%mZA3rrd zs&s0SSK@oJSy1~vRtM|rD ze|KM)78`wcDf5?KqW3lfOZTD{(=@2uCIBWfvl@E1FcdDy8d_Gi-UoxZ)vo3JDE-VX zqBf=V`DDH0*4Dl@1W1d*h*YTY1adY%r%Jyyb_#;zrk{@yRb<;KW*hHn!)lfqmu_g) z-1zb?QpL2IGrU0A8jpGyFL;|!1Tw=Fpcx9ag?cg-ik-E)2)UScc<#_@Q_Bq7oz&j36 z^=a%=%Ma=1WGG#thv1UsH^N8{4xyxQr#S({5jUr)L{$()xiM~bZab67rO9}WIEetJ zOaw+~nat)Kg&BQA$dr>6YO~xGqY8B$rRhpX*H@I@3jC1l)1TRYY>E2eiwBeJF(PXi z9^`BexWXGlaLw=vwj_;>jqQveOS1K9MhufMSY%TOw!r*R!2;);s=#*QRX4vcXp76) z!>{s6N74>L0D1^blZ2*B1Nx0sa}HIc&9>|-*xyCo@FVA7+^2C+h8JumO{ovWa zr(9gK;r`pl6CdEWfi3T2{s41;WOc%KQL#+XDEmlg)sHmN@98Pq?_4JgeNUPkb-sW5 z_#3ZtOM<9$kQSoNfem<<3$pbBQwpv}ed1W6PrhwKGH%H%p=KCA+<-LhHr3S%Cxq_nP2yF`am=V)9Gzv!9(&YTr~i!>3#Ni3wxWYkxJq|li%YFCWa zaz&<^?r?@aHJ`m=`P&HckX)=`YB_JS-`LzJZ);KG*%qI#$CtNWu-=p$SHR${7Ucg)mJxFWx0l}$_QENKQw;}T-~!{Ca<$~hFkbx9M)vD z#^=%nv3QzXjmZ8SIvM{lOiD@e^_)duThTRhskxVs?PAJFswEA~YzLF>2Irhu#&D?^ zTM}wx3H1WEfBH{99ax;8Njy25d@Whz*PPwm5&XSxOS0}qE$f-BU>*#_B+fyB3`zwA zDP&2kVNJ3!$rfGq{Ht;^2Z7HLPsTJvOniT;7-W7xDYQ|4i`QLE9ivdqN)e5dQ$!iG z%!g9Jfyy;kFQBK}*V3q9;>t(?Xrnoi;%I^JU4>wLU<_}kutic3g`-C|Vl4BfT4zYU zqGbGc?6=4xtVtijIbCj*#fEk@iR z0}BDhQm>k(_k7t68H^?nQ6&zzzbeQhH5ony>dO(}Qs`wsSBse|vGEJy{} z^kHve#rll~GmJYGv8uA(#f#>)!%fm9^}pdI$tfWecUjy#9Jtkw0i?>XH_TrfEvH(@ zidm)1K|m>1dZB&!#r^mqBj@6o5;3d5#`i+rEMm_No#-gMe&xe))C}=@-wKJxP)1cM zn{GMhN*Dd~hvhu@C2kWUKG216svg`wKJ|xXLYjH2EFzm-XR|+O`Llz9O||eE{~s0} z04VhF<>#0SEdL|FP#<*M6Mr=l)H{%|ys#;JAo0i?_p6Lf;s-VLG**M^iE6`fiC=H6 z?YzuBM7F+BGcXq|UOtdh4aj*?Fdo$pt^(6}djb8+?qk$avJwFt4Z1=%4LA0Z=(8C0 zEZynjl$acFvNaNUzR)%}l81PJ7>X zHtm|=_8>p9RjtVxuY~33Y`I3gLhiPr>bsM9$o@)a+;T|q){wOH^w3+){TM78|5n{X z!o;xzgLB8kZB1{F-?dMz9=ShmV{9neQEK zCem<=DVDTM`X0#UpE>bI7skt4v>{+#CI{Vm9KiwW>rCntkLFPj#@P<-&YfyAR(QK$ zc3&V-)$XioTowPC-yfD=8Q|ABRje@x5xN_Bpn@FE^D!IE%E{auZ>?Macm>y1{vLzD7G@AMD-;%c8RmWO$vV@T`I9wQyy6D9S27P;b5U} zhEfmL`TCx*v-m*LsFJ`o;Kuz16A)t5EOI7c?=@2mhHL#f$Q_ljbhlVe)WO4QoBHkwp|-IU`mF(vTdI4Sp~C8;91c?|P;jgU0~z zAXO$Hg%$)G`KHl{i}r>C^WWI=HlfAs$KC)H*;grs$yUyp_uuSCFBCVi1>(tR`}ZL$ zt8!B5(k-2(3(n3|x6Ov3ne{-#HWPwgot~oYT1>8%It6tT9cdsGP(-;*oxoRmK=38X_crw-c@gid2U;&Z)tTu!-q1jdbf@^-s82i-WNC|XM#QkW&#q49_j z`*3)(gkVKzsfA|tzVVV3dJ-uqTN+$?#ullGoJTJpjO~dB=M~P{toSucLr_`iJCoZ0 zk$qBrE-DOh-d&*OI-_8BPz>g(`=&=MmOH$fC}Fg9<|{|zm%g5hWj?oF@15dsscg9j zoFfgosUsQFfjk9bqdSd*POdqqr)ehgTB(IY2%O@USpshL{8g1f%1JK=64W0%`45h% zJgOZ$7@ufdtHf2Va~PHVVJR3VZfQaBnDv!83@4lmXpay~;+S$|XdK+sOT~~7IO3(U z{kgI2Y5Y3zv2McUDDJQyKYpjJB%mo!s%;6JmnwwMIROjr5=JNPW^wg+dDCwfl}$C9 zb|GPNzGt@+-@h+EzUY`7{6Y?g)INrW7aWUdWyoexO@`O98N5trwB_70_vt zOFG~lmgwg&q}MZc)BBiTIanlQJ4wJBG{p78bHK+pdu)3t1~ZGTYw`;GxGanU^cinI zpg#Io?PE&?Evt~Rm+jOior17}hxz~m(|`K)*QMVn2cf!m&I_syPvaKt^5vv*bfGDu zBwV~W={_a`c^2cu0S-MV2=mh7%*WpL%I|iS>C2MF3&y~l9{+mlwD3-iF(7!^g_RI& zLRD*gG^)}EWrJX9fZDPxp6^4O0bHzH>siOwskd^$?-j;Er8fAd@894%NY%C2_7~f< zT%^@hA)E@`E<&6?Z#mc}o|(YRX5&u4Z_-4RzPW3uo&j??R@h$iB=}U_=$(07XB`AW zX1>>IPX;Cwu(RT+IR$bZyx@j!_WV)DBHkZ}eJImmpqp>+vP}wY;^V$HG zVe~m#(Y+ygteMVuaoHs81cx&^{nOS`CzL9U46ngaWfqHpk#qc1s`!o3b73l@^VQ6u zoF7JC%AB&DaRQlA@%-$+KY9zYT(H@~QJ>33*5puSRHN26%c$+NM-P71$j5{A$R2KC z?0!@4UEHrKG*Ny>h~_$P>&^OZy~zt%)0N9tmsP5P&@&*z{R)j-JkR1BML$nDlv*E5 z2ZD}(sggP!lC<+MWqdLqXsa;13yree?#JVJmmBUhcukjnsQAi=d61qzAJ4Xb<>Hfo zGB(p_q!TywymJ)baIq%$sEy>?gw1%wHR|3C!C{nYIl0s0+hzSHmqz*|W#-C-ZWT!p zWd&(7F*Pz@HTnoI0EHXycwflD#_bqCMhZ1J$bqucIOUxwegfI5hn#(_0VTH5Zmo#( z(Ur(bV5$lE$o)t`r_pMfa)9*ZrJfA!s+&@eE@%dpylX@CvPtb=s=Mi@yW4VV_1E)f zbq>=F$k5lSI1%a*6I)vyu?t;={)%HfT7B}pPKGCnuc|~;9^&FMqzT9LJc@~`vIS=@ zO$Y1?`}PcC{cBq6o0zVPiD5w2KYff0PVlqcOc1XIRFvXhutt6WWqw)}^KWjt!N(ZU zLaU#YIDf-AwpZ%V_K97<`(50l^Bl)9Pq^bZqcWUx5+tbp?j*W>-f7*BoO;)X{n6&S zrq9{p&b?$g?%P;0=B*|)ocZRdB;NqrD7n+jNnE}Z#oMzT8G z?kBW5XFZq}l;YuV>*?0-(;wmzZzugCXV6zibY_3|2jW?ycloF~G@vC)wZ|*mpBv27 ztFUhB)&{wkZV<=5pFKRp`|_$WBFFSj^J+pBHJcgYe?}YYkxaFi(a&M_Az7X<(p!!! ziyaSln9#bfawq-ASWWb+iHR*E`F^S7x+79e@tbi@DLp$UE_{9aA#Xw9S1uo4Z{i@7 zdXbxc>b5c9D&i17N$}eh)C7b$&M>dNz(Z{Xzezm(MdjXWHS>*Ev9#XWkI+WcsKgM~vVFx}$Z|HvWr-?je-DN$4J z@JNR^FX$Ys_d=6%8P&V5JtI%Uxu*6#kpV>O9!Vk#p$Yd2avw{6`%-E=weRfhmh{_*4M*Q?g=SlG`!igT8`&}ms2`pSykT(c?J0JaH}~ZM zICX?Bf&1X1gC;QQ##rDFOX{w_+i1Sx-3oV#N5{j``z1juhK7Onmx6SkAxU4FfFNDh zs2fRT{|rnxZ%L$@Ih^~$@;QGFLq9r%+y`4{h%WE}9rm`*WL5^B5>Qf_m5Kes(tDtT zH-&{WW6}Y^0}`+9AC`p>mcX}`tN?j8wnKc!tO?-OzRXbi!y>=@->zuq(2S{_IQ^p1 zDdf;YAR;~NhM}W!nS27Tb(V`@K&*O51Y^K*5vU3RHMBk6UI%FC! zH3`Fu>@Fu@9NrVBjngi2bPCH|)sXi?vSH>w!XN%&srbyzxVqwBF^h}928L_tm#$sd zvpg7lExWCVLY+0d{F?uY?Rl1?Vdpv|4o$yDYc`_2kJo4@`+Go~)*Gj7Er)DtoB7ao zRtKu`Hy;+}V;hNHe)jgXl`))a2P#^8F0m7Od}bM|^x6{;WuBe}<2-D-5`FXDt7A<( zYLoPRl6n`io!^$6Dt&D+3vZSJpREV zJ5&8N{dLzzxK0Qo*|V#d5(Mgi9uax2HnOlEitx&;$ZDFqR&u2#)#^*D&zE9FQ~=YP zxZ@D{>mt|N4+Z-lvT(YUwh(8hhN?DwB@Md5#ATJaWgG00~8z@e&~q|}-d3A#}CQ)a}IZZu1s zhPu}Kn=bO(8o4Vxl{UPXeD@E_wf3;kOgYV3%FqhX^?k7qEzK&KDYh!MhL)lKu%MB% zGgy1Dan2J0_Y}l{BpXRL3L&7_0d&=bgFtoFsG{H0_e&d!WeVQjoK3#BDYox(pGln* zQjb0kTIpSaSTpjahI8(gCw_Cd1_;KNo|%B)Y0nE8wd2ETla%*)!8NrBYEM6z7j6sI z9(J~qjYh8vpr)~~l9d?|s{QO00;lZVawEd}@FsXc+?<1+7Ov{<^dry>Q> zIFt)DnuP0Y5bqi#FNKvhsTa?H-FiN*zHq5Qs%41)Qp+YshwOBr&stLpQf-v^&uV1# zK04W;+k(cVqM$9kOq6%@_23@Anx@S@WWyFMjRE6;PU$=(<#Yfed~w(oVRBi;Cz!*O z(I|Gj&!om0uI{cHHmH2x!RkkxNRS)BJx@f011)r*WHhF*+kACqYuGG%d_y{0=)r=M z-l5c}ilf5hLPmCz@mDLlOEP8YmhEPAhHi8ImoRI1ad5;4s85Doa0lTo{k(2{K91~A z2n=gVq3Ymg!t;JtoA~Chj8lB7@4R(Qkk-3sz$(NlbY#UiB=5imr-|FBq~TKIL#F(j z0Vpjla8F5-XsUbWKa~IZ{|f zqvuu42aP1p5-zia*WE(JI;1!B`+rzI-TszCwVVcW_S(kY3rB#W)vG8%AmfDCQ6N`u z0aEDHYGm7OvL-Wv`!Es@2D0&9?B<1!T-;0^@o3!61sE@osnak_3!ux!&fT$ja7K$A zNYq0BK&m{X>GR8LI#dIMEb`~4SrfYa#vhh_TYb8e4lic;0ZEq0w}+&cmTkqb%mSeP z^VBRrcrgm-yrUX{GJu!(-v$2N!M|tluaW$9Gk+cR-<#y`UHms#_@6S|vWG@`t7ge zNLM6)fNT^0>;TbuvdtW%U?lj{`?l3;`=_2aLL9HR7JZmwtM79z^25vZ-_mU68jsnB z007ko+(Oj5d#!oh83 z`o-=})vsP0|+-fDTd!B>1LPWA+n?XUTVexV@jUq zZm-UN zZhbP)EBCjDn1vZ-Du4#l$Rm^UR%kLnn;=oX}B(CVG)Y1i279LM$uz`;?PgX!Lois z94BvN;q9SaGpC2>rX^mq6miphx}tY^|SU_$wUsESrr=GJE^*>Ng(c zn6+Kbhf{_lE=>(}Vb}qzT88_=8>#0KacM4E$5@jm(NqqXI8nEPMH)=US+W}(x{I=5yt+zT}UV|oRUoS_n!?>?--)eT76(p z>w214PO)OWrzfgjdfg#{H!CEj@DBYWJbHJnVdfbU8*I09}q1(;$cB zzseTLX$Of`jiA>3r?`10SJ}Vb^gnXX+3o353i%5-b^h90=adDZTnO$X5J z5PRQt;h7|YHV>7*c2uSD`=)|7%Qb1~xK!_(XT?U5V8l01AYo_uzkGqUnUfgOgAiGY z(O9)HuL-H}b&Djm%6wF1EojG>rE_T|4V?~wh<0qsUvhJL^EOiE@pi*yJ1L(Y@lqJa zoNEL^ntn073#*5AR*zV$Oek(p0=G0B0acx-oRe@U>h6mBN1eDQ z4eE}F(aVM^0fSe(uktIhcbV9n-=u33w?|Am;Wg~RWrHDKP*aeh$GoukCxPq%` zfb-^b9N2-hQUDsFO;51*YwHgeZG6x9`t`zx*lx{$2Y<%x90(9)XLW>Eg_)Unln_s& z7A5k1_t2^4ez}tvL8i`Z0>)fJvg<{OVbm(TGwfozJ(7IDz2~x7`+L4rqp&*o#l|O; zVkI%y)vXC41#6jg9PK5A^sG071bel9-_Ey|loOwMwrX}Z#y;pp!hs#}LJ62^#S~*= z$5n7dq6kgDy_P(^T8r_D9T++PMpOUNN~50v;rfZHmuz~iv*Ti+G$TNi)U_QbHwdgC z0A^x7UR&Y4l7FMU&$yRHf4a!4^b?CJl9HP593@i6TW(i1CtzbpiLY^_hqy5C$)?*R zW*0Cz;nl`dYKqp7yGBGojjKXf9^|}opLz!06D6V7*9M%=vtIAcYX_X`w{-E-WpGjP zc(@c(vGV2b-)@i++bq4AbB%F{P|Oc=j-ID=-74da|EtyC!P*`$4wRbKzM#?(d;r+d zTuX#(L`fcP5)g|Za4NU8IAmvtCnyf_>XJG8=YHp%zT?|+ZBcEQ!#(e?W^$e$BG^}F zcmi~;qH^5JZw2KgF{+fCYzwh`ZvlCg2z~ec)yCt-k>^L^*RnsqRQwm4bWo)^B1RfE z*Kw}1M#z)YTisnR_7axB(E`xQ(Z-Tj7v8s8Z6RTL0y0@y zL5W~SFuutCMC|XsmaQ>vOL~SDt~b{ZITeXHl6l z28l0@^s$7LQzO5*>0mKl)y%#v&=_U#Iy_ObT&C|v(6AjfRpQVNycY7&9(UM@~QuD#OaxbVBaOP9kwbftdG)UKeJjQhsqBfoVx zJErA~w^#2_=b38dPxmjjZ4Z)4Ki1iB=@P#0u4BsMBT5Uk%GKlMXyOx61D*QD7Oqc{ z{+-Qj#*VNC102ERaPhA_Bqd~SlwDiQZd zxs)hIb|wLlG$+boLQ1W=>LH*}kPq?n3*a)7p4>k1?T1>q<>P>cv%WSuf=EyBQMw#e z!D|j;ucstv1F?>6AI}!e(4-~u{`L&Cy3||S!!;VV<3&xQA&PyauH;-+qj z86&rws7l(jo)~zeay}21Zbc%$nr0OKU=Frqj0{XyTv-gVrOXni1_uws-`!eZ^ zKec(F#pr>4reSuLZcqc_7x+XOu*MJ%fTtenZ>-t+tomTSR%18++wfagMvyhz>*UKv zl2SUI=mLyJ1Rvdp@u9TkqI-)niIOurp{t&rkzEAwZqO?EI-@%k6mjky>%A^w9TaS1 zX|pc{M84bQ>y%w^z#o?PYonGTAv9e$XhiFJ{+a+tgXG_mRA$>BSb#GswUw^@bW(27 z=+aezDG3Sqk6r`Qo^$4*E1M4oC z!+XoprKO{*U&{3H6WyDK;g56rNa^u#X+kvUdq!lxq!qduTBX?p0Z9ZfCSmgr7#xx( z=Y@xc5VA1ej+UdK3nQx2l4hh|k+z9MBl)@w1>tM4?1Fb+6`e2*ZJjL=Ei(L4DCd^G zdvNqoN_C*R8!EMCka|HWu11u5mPR$gJLD9vVuZyX)(@4`K>5_(D(ukQ5JSg01}+uI zOa0_SUXc4`rAV0{^HOG>2n-}lD*=~D1Zn(WY3_I_LEETpPwZ6l_@|oM;f?BL$LuX@ z)R@#G?+POh$-H~RK|?RYWw{+(MLZh(m$S2tiz#c5mvi*@{Db%Nu$x}oCBUVjpfj6S zNlkCfK3nmVe#k$F?AQU48r714iv*VFdWmI)6n;wHU20}qqqxnn5zL}vqdy-y&QcfN_T*B2vxF{fkN35NvUby`p8 zGmVSxFzgr0F>Pr++9dwfW^&T{+sJSGUP|Rf{j(k%nic1cu_T-DtyNq27t!we6dV+^ zY}@eJUrhSapu(KPV@|=sJ9KI{Tai&e&~`+yJlm`A6(Z?SWsp zU6b019}gsYA^BK5y!yhY_wTz{K(3@hiuITxNMV(Eikhh2-Dqq_H_Pm#_yBtIBi>p% zMRTU=7J=Q?!omGKB@Q+Q$qtudz7BmQwpp%3@cmp>x1I9$La+W7%PQ>wjBwTWVnFWt z3#sItyGCGh*^mamA^IVbB|fqd4{&vvQY6=Ktt*65FuwaW5-&Ac<3z@U4*W0ey?0R4 zVYe=dz0kYVh!hb7l&X|Oq>F%nfDnoZ2q6>!fgmL|x`0v@1e7Wz^iTt#BVD>A)F2=| zp@aYlp5J$7zP-P_=bSrt&b|B0nS1>s!{nXe7xJ>+wbr|ywVvmbQmBGtg77JaLS^k5 zl6Ai@W1-l!OQ(BZRhJ{3Ji{|YKo_tMj6zKy#KfIZFc0GOvH{7&j^an?v+Ngq<4aNk&i0MxF zJY>D75MfGIp?%JOGrx(lUr^&CP_3H2wP`1--5Z!O&^~xtTYI3|{J{Cuf*K+J_F zlZg{|(=P9!Y5r!_k$xKSbxrP93T-vNDbt@Xn0ok!?iT#oNg08 zqE#vwtojp?abbh1qGGwqd%sDjqBa&D`1Wpm&a<4@dA;o0%lkxrGXu!!ckX#{CW`Iw~+3MnJc7}4eMiZqI*}w9;4m- zQcU(rYD{N}$ikxxHNp9-P#q`Q!T~|`0I5j(^h@f+MC36y9!PhNFBs(@^t*6&u{p8( zExb0dmCji|)jAtbzvg?kDAgl}DX*OxN_=wnhWPAr+D)y8TcXC08qcIU^4_we={=@2 zJ3t61?h5rb>p=pEz8Q4sSc*yqOkW7Fcce6dh)V5vf%~g3bY8hX2WVpQwZmGnp_B6Z z-*aO1HcD*OzLDp?SFd(oOtQj_0h|`mNVXLS^PW4mKBaMi&yy|4e+Z*TkfGcU66H#| zBGr^BZ}h_Lab-0Nlyp8oG*n6XSHgRv-;12jm%G8f)S;9C)y&LFgGs3nJvNIs;fNk^ z)IGS7p}%KJ8j=z4Dmq{$1bJ|w$A)_Qhj0H~P~1OC6`^nfd_#6J$A~!-t^H#)g#qg& zTva=25$IK`S@+B0Drtv&kK{g2%f+b7&7OB1{w-^T;1y6Yj1sE~Z^u3T{AFfke7O4O z6dL1x;la<|P&bd#p^K8Dd>SA8u?u|(n~9%mR>Z0$j8d%!4~N94CE1+$@XeqTM^-C? zM|XmxDZZst6^bXxsHKUIc=esz7Dg-Il5&w=0KVSs?8708hfR!ci!M((CCgQ5T;_|` zO1@v(1bi2@V~!PDI->tObh`9D!FLUTZg6P?~Z&^+;UVDufsyp|7^XpD@? z-~?y^WT=Kj)M5>pb1)XePSAUP)Z%ibY_zm9U9Pd={;ebE7Z@jqQKCc0q*L+Kiq+z= z9F?0`y1`1sS`zJIs#KWS5O(b>d~h0=q@<(l$l`s&Ea|0_uTWBwrS4^3x<#y0X>9$~ zPS11b((urYz3|XE^vKn*_B*hN6gbz_*E7~jPhTa96lXvA-g%=_ckvNTyMoF-hZ3Hz zc`HB2OhMi+A9}9Fg;SBFrx!Qp3%n)#03ew8a>k5~?p&_xzln~X8w3$ev1j05FfFwT z&q%L3x>xjzR1+qRn`s@}bb4ezUc8um_uGY^ABt2IZxrTyrh~UMDI(aAr>O?C+~-&> zDlf^i9c8+3u?v_Ki7m=QAXdjy6tR=#P%V3ffs2mJ&oOX7EFsL9E2Jxvm(bGYGFTAA%}H=va{a zzFuwNgPa3bI^^pGx~OWvZhbhO@GjJVgS zE^$+pA_S|nE5^*}?lRK1`I}g^n|F@I3&{`h=yZ!zbaT8t#qO~~N4u4UE}pja2*LcJ zlMV^|L$~6&8JA8FTrgC-4GxOBhFqcFO7ZnfgV@KNTnk|Z>VOXsr`qaEVhUF>E>{v6 z{QP;HrgPrUsNX_MJVAUsXEDg9I}m8m&pWX`xxjobqzzfVJ{|%hZARw>sYLM&a=|iq ziF+t=L#~ZMzO2+T*?key3cFJKf!eVxXtwid)WbS%xKN@{ug9lg+z^-?IN^e)aZdFF)#)hlqJ?fZ~3MaN99=6UifvG+|BjY(ug!M45S4xCzir9SP46$AiNnox2?AX*_7wjP69Ye0)SFNhRjTgZAz^K`w!bmC& zn8**D-RUJqQQ#y#au!Ku<5~&Dry0U3A1q^AO%&*lX(Yc-_MZ_cE3dChJ`@*f&|tqO zJb0+Os&~Mp`-d)|Y|Zr#U8VEerppMK4nes_EUn<`*dC+K9s6&yL9*4a*NhrQa7$3V z&JUh>cRdy&>SE0mw_lgCFosTL=Ey%tk!hP&m{wDuDq9Vj{j4+SpRp-EvKl`w4Mg!A zjG=zpv7q~9$Z$&B0ycDiBA%kt#XB@k0Hl-Ois-Ku@kPVGNh$E`R>|t0x)1Phb?V)$ z`s9vzH9|c}@j=dK;!hb9OT&YN2%aoKk|c8!fzha=Iz*WWniM83_J2?7i|;axm)ECw z%xK4T3x){2UmR1nI;xNQwgUEDrhF?ks>tZIK{+~$_VYnxTDoVkIGF41%wY!49CIj} zKA?~OxPB}MT!DZ-A})ub+)|f*C?w(WdWBUHufoP}@(76_nSPN=MJU}z&XkL7cK{V-(y;;^b+4ji!Bq8O9$-TtiWTx)I(Im!CKRJ9iivg$(i&8->9_0@S*gQFW% zHBu&!?@|rMfW)4mrTZ~gjZnBOJAQHy%}ZjZl%!&U0o@y-p1N*llAXt$U*q=MN^_I5_ig!!kw-7 zfgoV29Zpba94HryQ<$!*=E`b4xIgIS+AURo&e86M@^-WHXV6Em>_5muS(*f?w@G%b z2B6jq+0iQIv(&r8m_Vk6Y?m@SJ4&1ShsoS9J?X6YnIqGRk3y5rMB-iM-X=NphhzXb z$hJz?L0!|PcGL2i<)_2wP3y&h_>}E!8g>6@6t}0h=CGsR392j$mb_0%%5%Z;hlsi8 z5VR9YDGQn;$eYK*TWDw?lrQ>uZOyQKWfeS5!V;C<^0F$^`HcU138XWFyN=RNQX)5I zHiH-yHpCWn+GbhAUeQvkt@4VKYrLloByf(h1|r5cAAP?5?xDwn{wSJFiD?_?bdb#4 zU=(n>XiCcqPU{eq4;z@ZY&^PON^Y;R(Dz!byfpIU`@%0XsfMM2GY^9&5c~DpF!cbN zz-S|zU6_qGTDm(Z@t_IXOT)T_ZO^+Qaez!~65- z(qeD?NSxG#=N0E z@pGE@8l0NQef%?{-rq=LCy-zi+9VpJNBaa+sPa29nf4f6Jx>I=lucYsxlgoC=ADh$ zz4V~f_ga-Nqr6YKFY|eu4k7cO*%!evfDcX!k|QZ~A%6minj!INdhy`%{TGJ$UO86k zCue6KsRx|i=jvu`t&)<7kBM+H*KzvU)4Tz>XKXOt_5mrI(l3s3cPhQ&F+Pp!wO27P z>Ml)4%%%7!OyrxDE*W(|CZm|6L1(D;CBMf(0vP8)K)VxsVPvmJ<`RXMk;U3_a_y!{ zVnuZsTK2qsfM%t-6r927yyOkRa&k91cZ^itSKaVln}0B6yU$)L*aDzQIHXwk z5|4nMNRWbnPTPK}!g6Gj_-e|KwT&f)xd0bnzQ*va??k*H2rg}*Qa;lZm-QJWHT!&G zcAabk39H%(zMk}Of3_b&1ENm?(tFMC_z5wk&Bg)fKz*G^btYBWa@E%Q6VHa)e_d5l zL~){j^-6!)foeU|p4#L(er)<7QRI1PD)#Bt1%M-{{K$&RK;n*4 zWgq%LmthLW7XAlEbFtXGkRC``Y3mjXWkREA!~>%+hoaY=@JKP-k>1r!#+^QS_^Ba|6@+eaDWT;G>!H(=(cYOqO?WXv2v|`CZBYwlefLgz${}{uUxr*Imy>o zdagWYAXn&=%rz#nds>qD<4jpvmtU%1x*UscrJkaF0)viw!F)imHavv&2Njg(ic361 zFIInY6Vi_^Nb~vrlgeqanI(}GYG%eF*YM9V*HWN)(s3{re3>FZgcFQj*;DLD$~#^A zg3_e0>@4-*xpYG{W|M$D`rc@WEVWfCnMd8|^LK8h+e=Nvtja{`nTC$8EiWXp-Xy7b zqta;8AQ+dKimfziD;1zx&N!{%mRX^g=?)O91!G2bWg-PdiItSfju5f2tE7~$g|cAq5~1U+o>Z~o4=otqKcf%t{83Nl1>TENf#tN9i9F)QnHACR`OcqXX;%ZjrDrJ%ifYJ zG7+8_XJ)#fqV)&g~(n={zL+q z`~V2MRqe7`$id*vrES8a>j?WLEw=&pVfT{WF+~LrI>l@%FT{neKimI88pAgF^O7lGMwv^{7P9o;zux(gMj&ZSgKu|a!ORy7V9jW zgcxi|mMWxtLQA%HjMiLW#C!-a%_?>>z{a1v-1?aJaI4Lhbr{3rtkx6<5NbZ;R+63R z=S;7-6F(@Kl5BpFPI^;cPViB@p2&ENyOr&*_qQrU z6=7AB_NzV}6cByL<|P016hdh@h_{p~w1BdjvE!lSTalO|Ju22Rt_*J|L_d<+@DPiV z-SSe7zQwS1{z=|^+4*-9>IA#+`emwIcm&jV2rYH09a+WPWgurmJdO@|t)|^GDN`)M zrVpZ4esJ6E_ORgtXsT{}zk?xh5LhLxw&Nj8;e#PjYx4 z^POK}^P=`f_U+~@w#Cm*)M|aI1dz4%x4;2*Yji^`daQbuk{XZAT6*DIVFgF>Q$~nc zx)7JySKLj4#KX2+7+7)~ZSw?5f9E~EaUkYfk+ne;9WOL8MLNJaAzrUk6>#WLRBK|~ z>kt-esuD|n82UWgj(!#A#%6`eP0n>w)yogqa9#N27aAe^rR{REqA84{+?<=HI?t#QujB z9r9c3&H`{+f^`6*wy{F-lcoP*W$0f&?C+=l%fI69&i}Q#zbEdmxB2HO`0Hu^Mw$Qf zBCKhfQ1aytURh*l`G*6xYrFqedq45dXqBCi5DZFe8e|-D* z+@7Kv))~)oj(FF4rxl{|R;j8z`?!hmWw1>8kJNmUC4RhW z>!n$ijrtZLo>Y&yKh}y5V9}B6X1^j^7d0=G!tm^q4m~6c>6#9CI3bUBcWx8BPV~)a zJUzh+&Mwh{b5?&V*K~e-+O+#R^DjyLibbK|Z$KISmlJCFsm1))RI3DfYq+QhEt-E{hnV$(aM zGkc9?BYR;tKRH}1P?^$L>MZ{4Arx`L+EGrThMou~kDrlHcp5-23OvfP2;eyk-oa#b|j2QBRhr-4|Fcy!2&4Za?LyWmCdJ; z;23r9Pev2uiWaX;ItRp&P~=%GRs$tl{l2I+fQCWKON85mw>tDrCYa} zzRe8`MqMZ1V@Az~_17Dpd&7Eq(}p~-uB8{Os0Kg{+j`1y0z!;dia!4N72HL>JRfqr ze(=_*86Q(&R%GC7DmZ zy2K3wZr$3}g*>+7M7Dk^|KT)I zbBK%4&#v}NE-IW5FrrUEmUI;xI4j1X5MfIJo|tPK^eln;DxN|M?^Lw6mUZOatb*$G z7QZRzpGi1!X~Ws7LAPBu-~yv1H55>TBTDhc_% zZ>tFKnC7>oqW0cCpz2T9nUy8WE1jGS>2~G_dG7H=QM6k0J=;YVC2y~u-Xq@GH6%S% zCl5!JqX-cfpra^v_bd#ta~a-WV{9NEZ|vYf(mDDN(VXJLnrxFP+hW@DaNZ#C?owxpI6Iss8%mx(>^>wEk8@0?LTqjgb{sq6r?+13|MDI4xzEUgi#5cZW2 zA)*Qbdf&jq3)RAWX!b_#OKtf2c7LtM-8@CSMDiVnGcRvIHawlPvS>UpJrZNZLbT&OAp=8)ye>y%@~ zmFrcYFuGqBz-FzYe+HXaAh=P^e4{)?OzWqT_uXV03D@k3`u(Koh5Uw|nxJ7ylcdbz z)YfvRc1>l1yNxBDU^rksooMQZ+wi=ay@>`;`$Oi?u($h!!Wz4Y(~R-V)n4+0xLBSb9&k+|LZ-9+v^Gm%m;m zee5W^<&69K(VO>N%T`o~piP>dpcv)$?n zFDs_7hVAy8H!zN0)>KKzJOMXn|2=d1hy1V6U7DRHQPX0d7aJB&!09=Dgrs}2wJ`qp z*hy@0uaLWxctK!NdzX0IEf`k`Evnjb8Whbdvej41L186t7dqVBWKFsShU;si{Jx-q z1ya<;NuG#0DO%h;T3*#BRZ#*>2Rgi$d48J0HL+3OnN~#mJcYmW1m=*afT`5{y&B7r z6SE^+a3V2kN9J_M4?zeqt|M5fZANGqqeGxMJRq9YL|h$`FM^(*>0E_RQWtk}5keO( zUix}sX^L>8S;f;en$ossK{!H;>ySfY=%kg$S>L4(UE{G7u52J zyWh{wUQdzQPjDU(e_UYrAS!yvpyVm)d2xJvLWTFzQK0zIv}S&Efx4kVKA%m_R`J-b zSG30syKd*cUE~D?>_*^MO2YjizE<4TNQ^@fJh{@g~}3JR72J4*j8fI(vx<9+zk4`MphXupnXBTsMjz*;*So@6xJKi*!kI0($=bG%Tgb%g%c#FHJW4M)@~iCd}oyNnV~z| zb076MLM<=8UwpJ&LPTyjeL>_A068ZG+DVm0uB~=I+10YPi>tiBn25M%Q5BV6t^f#C zVO+JDsQPz7XObkD1xs14w{E&#H}U}u;T|L)S)eNuBbUofbM*4@9nMP0DBE7?< zVUj|)v%)CP2)oL-cF*W0W@1cDgo5YAZWIM+o?tX4Gbbm3h`y7q>90ayElu#1kYMMn z)LPO$|2XH$nn{@1Ez>wz!|{}T=+X-a8fFQva+^@V$I7oQ-6%5gkT#xy%;=$bN0Mr7 zR>(7jQsTW8M1Wm;el|joi`bc)v(hAlN$7YXwOI$Z7pQU%IPDk1Yrs$*$u#-4q8O?! z9HK7FQ>a|C@imL0(`tKt6r(qvc?sdV(-PLPCm^;;4^106^Q*}WLkdm~$ojz3{)S%Y znY~w~iiQtw+rg;#qgJB8j@H#Oj4=Eg}2j0AyXwK?9E zb@@!0`p=u;WN|%tE-oHVAptdvn25|iKp1OU5OF}!_KAy<7@ zI>?vy0Xu(_Lh@Qbs9qpq39_p=ON*&zOS9Hs&o>p8H zY`tPkfoPDfwo9U*Kv&;I{BM{$AM}y6^Y;vkhKw17or`6U_CGOosoW^mj%%C6!P}7R zRQntk+}%}^{^H+0YeV{?wUI)xe2M5}o>MF<%J&!VaW;!j-o6BOB02yJ(4j~2znl0o z+dd%V+O1NkLa91ozIZ8}hqWVf`Z2df9&Ngpu#G!E_f$4>nGqcje%#}K4Lv_Q8P^Fp zgE5|uW1$!TotbIx)^md2-^1a#514n{IpTzKCBJg41k;o@^ z!P1mWOHaBW0ZQb#cOLFrOQTe%%h~Mgsq?}$AGKc(+k2g06)ECQJ1ekg^Yd|U)w6(m z1r}*ZkA3bJ%D=SRzQ> zSK)JV$W3fTeT8oNyYlObH}GZeIqBF)<$U_jbTm3N8voFht{sk`CJDUAb|cQ9Q>2uR zS$3eSg#*x3IMs~25G1>8XSG(16Zlp?hRLj!hbd%ax;d~y&!3R)%PqZqs&`rP4y|B| zDn*>_RPLb~k+|Ps0%$1=$;yJ|^Ea~)9+J^T-!`)9hkJkGr)D3C>}WT+VCqcyI`4l< z;y=s}$2zX;%}%m+%rdJ&kub4R(iw$F--aVm*b``swq=ggrT5o-BONs#G@mT{*356g zz7RJlM}Ta|nx0uVp>h%O{(v>-mNy)mtf%TfzL&_k3HG`{_d{P$0ZZ? zi8URfmf9Zn_>3g-7Y18WBWxSsH#|#W07iQf>~@D+v^X9nau{pT{m))LNY3zRP>WCg z>m1@<6ZZOt91GKO%{uV+v|3W7U+Q_)n<0c%^E#??cyJ1r;^fxqjmBlomMJkjnNhm42qG z97zCUn_KG(M8h`idg9nbTnLkvXM}W#bNIb+Vc6~P4`!*um*5Q$IJUkpa zDaqE;3jiEy=aHr~wowv+nmh16#tKFqUd5Yx*UNNzMhJ>g@_mT65Crms-LR^G58QX* zmYW-&d8Tgk{mN0+w^E*Dk$PFXmQzZTeNMt3l5g}~e38GtND8z_XhE`6ZE?Cr6BteB zpEmerTd!t`&!;_nCH1&543-^fF8{g3hxfa%*4^KI-wIoFE@3N{<8~peZ6p<7;^D2b zXOso_a_zW+l;DDKU7S|Nc;Kb$jBovmJf7M~OF}JAFO43=-)(Mx)neE-AJ>B9p&0f9 z&?U6hw0B|J;}6{<(iBXzJ^G{p>=zWjvTmkvpwC%sAo4BaNlv_*_OobaoVOu<8~KYI z)|6^U-uEnhD7ZLx=mS3ny#VeDk{09R>%-gWatq8InP(6>^PVwFBnVa10?9*2o>sjU z2zPML`>j;NYc`(yB-uxc!Jp~dc(3PGoO41STgq8k#Zlyi5XB(9rr4u4#Sl@_0JZ>8 z!N+GnZ%)d8bM{Se_6*_a*fQ7blPWe#t7a+NtB7>R+H`xlTF-=Pipr4!<;bX(0{cQ4 zMqccnN4}9-a!F186znDTm2lgkGPM<_Uledzvue^)HFW&;GMldY84hVgpMje6{?Bkg5cW#r8!iuH!t(qu}0EPH*Cq?^U9~w#ufei5>VIJ)HE_| zTPZ&guQbrpGb8zu~%8Gvl{rEU!A=L0x~P zp?l}3%jMM7Qaa{468CmWC++4rCjg52H(XX{rb}Y-bW{9O;JLqtyQM#6b*c3wh$a;& z3*BffDK8O@e3p6LSX$UlD_5$$L1t}lV$gOTI8*QbFX6fpl@4Sjw(Sp{09A8D)vg4= z-GUp8mN&jMf^OL0!Yvq=ZU3wfuwV+)C0EKZ7T?elPOym)P++S6FW2hk{PYbWsXewO zNLbSwk#8$ETlF=1+$xR_+SHx}jTtO3QV|eU3NiBiWInh#ILlt3j*+{FCW~@Mc937l> z*Vwq6$PHOJk-U(Qy4l}q|IIQj6I+(qdBHg-NL?(^;RyD2B}LGVJV>pQOWUe= zFO3aeie!et@tMq`f*~*|zHzR_8l|qSzykdAi`nY%@UX#jo%1;94atHTK;*7%^Qhm) zwW=oAsI0rW#AhDVLC^dSSeM^oTX{C4?obfPU_5=LxTr$=-R8wU<*&bu0D;#K_N#4a zDARdU%7v=B5NXk=a~;DPRfxAOWPjci^7ydRA3Er}=y;34Z9ghrGM1K9`g}vOm24Vh zGz!&>w_!Im3a>A|Z;+RE_UIMjhZjI!ymK28@a1P2ZE~lo*5hWb*$&tR*WEdc6xiTv z$nQzt%6XVxRr#=T|MtkfiOEiV%v8u#PrrL)uL(;nJ7{Sw#(BX&Zhk*Jcg#01v*OVm zQs?%8r`UMBR?Jxr_TKu~&(0Z==1ph8TonJp|2_}$ycVMXumPJ3a=7zu?{qYAl*hUY zKYFy8`8+{dM7LC^Mi!Wk43-PrCERmH)HNVU zx&=Kq^-7nARJv7~pis@aqzO4a8wKID7m~jqFJ~FLVnZMZe#)|8-I!-pTo`EzkB3K$ z^zxPD8B~qrOgi}T!$H7I3r5FzfKTE3@c>w$daUUx;>D3A<>*pm@p zF0Gi8DHlZwevu|_3m;>ho^373|P$)M@b{=Hj)O4592jR++7)PonbPQR=HVi3RaECa4>muHN0Q3)wAFB8{LoDAXlGu4NwOFWXZ-8k0tapB zb74^jbOvM3<2qJCW3HAcK^Jy?b=mi?2zM!-516{#%V~Dq>$6Ws&>I5WhPj{gZnpBy zkK~DC76AhpfR4)f=){=ncgEPE6Wa#FxQ#%sozQDox-N71c8a|ZVUVGps7dkt#rrfX z=*gjw-L0Pj%1D_b3(oE}rm{Tw(Fv^##i{?8DmYv4rC-l|Ues~fJe&AL2li~A; z&++FJh;hxDKT43!P#YyeX2d#`hd}dbwhGP<(8soI@x~a34Y0?DFz(fx5^<9Mlc`tZ8%92k&t=Q{sm+NDaa7a7yO_X@UiDl`@e?B!^B!oz-e#Kf$i%6 zI_9qYt)N*M$ac?HiaQZ+O)_dn^1E6srJcL8#Q2bKqh>o!fY%M+Mk9HAa$;OJr6`%%2=3);Jxq3hdp_rty%e7&&EoVry`{GK z^zI%B87+9JNhnBD`DlXz4Cc~%{6sX@LYx$@wk6-a>H8+Gf-l>BX^S?~caSNz>K}PF zE}!B|5SgVn=cK5}HwoJ4VF3Bw%k$j0U%VqfX^Ah-S8N9@)og-57-rkV+7Q_Xi6RrS z`NJf`d)$T=Lo*hO)#DSUeX)V!Jvq*N+xJTC0=^k$na7~bqb#r;KaP~^`U45R;o%`7 zl)%;zEk9g%r2Ce%yQ8Kl1KjP3>^ad_rw@wWo%*pXaO_0MC&;`?^?y~>2x{TVJoT06 z(NH4mYSl5tVhwOPV8Vvd!9xtp0+@Yfo>r0gt~cmtZr!nh<8C<>MCKgkEW+bTSwbh- zyp#U6+n3n$XpUHss0)@emy$TmgrBOgn)$rErV&00Q{Ll`-``CPuuWVR*@p!VQEXB0 zN_k6s`lF$!=*E%-X~(p9^GQV%x!TBJu)-xP>cHps>nQOrC;xA}F5;SU-iu##y}bBwfsRS9xm22H1hkEmtlC~43DN=>&@Tl+CW6$mL9F1E(sZ>Y z%t(tbJi@WEzV>D4bBUheq5^TtyY=O}rJR(Et`;!&sBgN-oaIWYL2*rrPtx_vVG7cl zcTRs6zxwipMK_W@L>M6lrJxoJbc61X00maC=gDbgtLcJkBZ6q z4ErYQQKxI|D#McXIKQwiOZ!d1(1B`X18swlYqm7$WE5|Uc6V?6!BG!TK-(DKcwV>5 z+-wwEpV`GIv$1c-r0O{r3=VwcdA-DUE#`{WD4TI@0Ng3qd-d*I=1tQr2FG6v`Z9P) z4U$qvTtw4(V&@wIl8cfupPQaE{uQ1tm7^E5Zy*sNl~-3ACYB7jhUwpo%3qwamW{hW zaVWF_nrdta?%ZT6QX@8&dP7xcvC9iZ>Dh0$75K52R2Qc5JlgpEjBb4JhkQTu-QP_H z&c+{Jd|SN)LZ67N%_TXJ|Da>I^FISp{<-c2;RD~YqH>Syex=2fP_m(zM`=z{es(P@d}uE zfrY93bKWZ1l9JgKvgJFaxU^0yyOzON$VH4VA@C>|)N*L*T5q0PI0H#?$kF#dt$m?H z%rpMds<9Dw>!|Qby6VnG{0E`MJG#J?xEWvG~T4y?wkhCV5=Il zc`SapwZ3s9=P)*sP|Ap~gQ78w#QjLLuDkXpf(zJ;@yYi$ zI3Zzw=-g>QWw}jIq;W!3XaRZ7RZyV3d^9YsNZncVIR6YaUgL>-D5{TCl`zQO`av~P z{5{?GuZfu)6q^NRCV+*NftD5mT9Je8H(j9wsIpUL+fe13K?gMz3BfVf?n>wadHzjEa; z1mu?vxWR%;|4O!Gks2~Qi%tQCAf03wp{s)8X{M=x8%u+R!|NhVjrEVn2l3~Bu)ObT zQro#_A|iX)*<_VDcLJ#Cw*W=|U%gFNkUy_scNM;RM5a~oZ9lE)^u&43&O@RS?v7xD z7nlX%k@?l-!4C7^*Ksz|?Kib*KTccUsr9$y6pqQTWH51yd3}=ye2s>#;)6?Z*_{WV zcqF?a`fGk970aOGVl}9rKr@@d!fR{jbT!^a(%)aYrr7DHcpJhO^puDNdPkokdaX$0 z%6x>KE(!To9#LhZVf;~@Dcj50s~~{7`gYu~bHcxThBc~QTKj03@W`@6lXGpUfq{JE-G zJ@}SRik_s>qDN7wG&Xv7cx^g~w%h@!ITya+F5gWRo!{aL(vSK>rykwGUK1vSMy^jw zTOTNy1{V?-4$?#?83LC*#qMkSrLL=*0)&asHoMc8DQ{CyMF^h<=<50rk&%SKlt)L# zT2)cgw`N7QuIom3T4{Cjf7s4nM=hkwkOdpI!IE}5b4PJ(6ngc9d%Q)(GQmC`6UQMb z=U|>!lmIT|?60ynKe{vbnk{0CLLucAt-yRO9OQ=NqzYFPJ)=}DbGu;ID_}V9nJ@8A z#nbpT$bsRH(>`gHiv$n*Rk(NjuFERTi>$jA0M;y_@x72wbj@0B&};A?g4%u_h9U7o z(7f4U>=~qsq~|-{>bmS{L}0rh1IZ|i8b115;2BO~oyD;DaV;ZEt(}=8!@96IzNVic z>Tbmc+gEm(v(<{_Ss_E!c5GDGJb+8LgHJWRX}0BxvmEke8Lz4FqI?;ARz2}RQ7=lR zRz3bIpTPHXF9hl{AhgI3PH2!m?d$W8L9Ac#zm;`f?QZzoANulABM-)#Q??2d1DF77 zV`uh~$0rT??o|+2!+6{lt%_koKU&>V7kTpG+vH6{(zfWkJI-V{g&`On`@<@4u0v=`Hf|9-)CX`bztu@24=HVfCX z3m)Ien-APgu=B+Zj^u7Vuls76eoeXnf<3Isgj+3g7+a?Pm*R2&CTF!KJ98V29T9rD z;Hv+4yPh=g=>C_TjJCs3kUfc+c1GTl%GvveuAAcoTI&fdydAR30YG#XGJohqjL9It zRDPHOFj0#&mipli-$QempFtXc=^6m|+Zk^_Tt@4E;VEXB1T6g|Vz!MpHl{#x2Zlpc zC8rqYl(Oq_M462$v-G~#?Rl@`U>4Fw03BX*nWadk{Gp2%KVptR{=NnPfhBgwx_N1a zYtIzi+_Fa8zSh*l2)&->y0xGls&$I)#BaI+IQKeO2tXF|059~q4ta1Tu8b0X{5n + * Example: `kubectl apply -f container-azm-ms-agentconfig.yaml` +4. The configuration change can take upto 15 mins to finish before taking effect, and all omsagent pods in the cluster will restart. The restart is a rolling restart for all omsagent pods, not all restart at the same time. + + +## Validate the metrics flow +1. Query cluster's Log Analytics workspace InsightsMetrics table to see metrics are flowing or not +``` +InsightsMetrics +| where Name contains "envoy" +| summarize count() by Name +``` + +## How to consume OSM monitoring dashboard? +1. Access your AKS cluster & Container Insights through this [link.](https://aka.ms/azmon/osmux) +2. Go to reports tab and access Open Service Mesh (OSM) workbook. +3. Select the time-range & namespace to scope your services. By default, we only show services deployed by customers and we exclude internal service communication. In case you want to view that you select Show All in the filter. Please note OSM is managed service mesh, we show all internal connections for transparency. + +![alt text](https://github.com/microsoft/Docker-Provider/blob/saarorOSMdoc/Documentation/OSMPrivatePreview/Image1.jpg) +### Requests Tab +1. This tab provides you the summary of all the http requests sent via service to service in OSM. +2. You can view all the services and all the services it is communicating to by selecting the service in grid. +3. You can view total requests, request error rate & P90 latency. +4. You can drill-down to destination and view trends for HTTP error/success code, success rate, Pods resource utilization, latencies at different percentiles. + +### Connections Tab +1. This tab provides you a summary of all the connections between your services in Open Service Mesh. +2. Outbound connections: Total number of connections between Source and destination services. +3. Outbound active connections: Last count of active connections between source and destination in selected time range. +4. Outbound failed connections: Total number of failed connections between source and destination service + +### Troubleshooting guidance when Outbound active connections is 0 or failed connection count is >10k. +1. Please check your connection policy in OSM configuration. +2. If connection policy is fine, please refer the OSM documentation. https://aka.ms/osm/tsg +3. From this view as well, you can drill-down to destination and view trends for HTTP error/success code, success rate, Pods resource utilization, latencies at different percentiles. + + +### Known Issues +1. The workbook has scale limits of 50 pods per namespace. If you have more than 50 pods in mesh you can have workbook loading issues. +2. When source or destination is osmcontroller we show no latency & for internal services we show no resource utilization. +3. When both prometheus scraping using pod annotations and OSM monitoring are enabled on the same set of namespaces, the default set of metrics (envoy_cluster_upstream_cx_total, envoy_cluster_upstream_cx_connect_fail, envoy_cluster_upstream_rq, envoy_cluster_upstream_rq_xx, envoy_cluster_upstream_rq_total, envoy_cluster_upstream_rq_time_bucket, envoy_cluster_upstream_cx_rx_bytes_total, envoy_cluster_upstream_cx_tx_bytes_total, envoy_cluster_upstream_cx_active) will be collected twice. You can follow [this](https://docs.microsoft.com/en-us/azure/azure-monitor/containers/container-insights-prometheus-integration#prometheus-scraping-settings) documentation to exclude these namespaces from pod annotation scraping using the setting monitor_kubernetes_pods_namespaces to work around this issue. + +This is private preview, the goal for us is to get feedback. Please feel free to reach out to us at [askcoin@microsoft.com](mailto:askcoin@microsoft.com) for any feedback and questions! diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 04bd7c6e5..acbd579a0 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,23 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 04/22/2021 - +##### Version microsoft/oms:ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021 (linux) +##### Version microsoft/oms:win-ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod04222021 (windows) +##### Code change log +- Bug fixes for metrics cpuUsagePercentage and memoryWorkingSetPercentage for windows nodes +- Added metrics for threshold violation +- Made Job completion metric configurable +- Udated default buffer sizes in fluent-bit +- Updated recommended alerts +- Fixed bug where logs written before agent starts up were not collected +- Fixed bug which kept agent logs from being rotated +- Bug fix for Windows Containerd container log collection +- Bug fixes +- Doc updates +- Minor telemetry changes + + ### 03/26/2021 - ##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) ##### Version microsoft/oms:win-ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021 (windows) diff --git a/ReleaseProcess.md b/ReleaseProcess.md index c6f51bb65..8ec91546c 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -35,20 +35,21 @@ Image automatically synched to MCR CN from Public cloud MCR. - Refer to internal docs for the release process and instructions. -## ARO v3 - -This needs to be co-ordinated with Red hat and ARO-RP team for the release and Red hat team will pick up the changes for the release. - ## AKS-Engine Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR https://github.com/Azure/aks-engine/pull/2318 -## ARO v4, Azure Arc K8s and OpenShift v4 clusters - -Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly. -Both the agent and helm chart will be replicated to `mcr.microsoft.com`. +## Arc for Kubernetes -The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required. +Ev2 pipeline used to deploy the chart of the Arc K8s Container Insights Extension as per Safe Deployment Process. +Here is the high level process +``` + 1. Specify chart version of the release candidate and trigger [container-insights-arc-k8s-extension-ci_prod-release](https://github-private.visualstudio.com/microsoft/_release?_a=releases&view=all) + 2. Get the approval from one of team member for the release + 3. Once the approved, release should be triggered automatically + 4. use `cimon-arck8s-eastus2euap` for validating latest release in canary region + 5. TBD - Notify vendor team for the validation on all Arc K8s supported platforms +``` ## Microsoft Charts Repo release for On-prem K8s diff --git a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf index 720f54820..8a69f7995 100644 --- a/build/linux/installer/conf/td-agent-bit-prom-side-car.conf +++ b/build/linux/installer/conf/td-agent-bit-prom-side-car.conf @@ -11,14 +11,27 @@ Parsers_File /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log +[INPUT] + Name tail + Tag oms.container.log.flbplugin.terminationlog.* + Path /dev/write-to-traces + Read_from_Head true + DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* Listen 0.0.0.0 Port 25229 - Chunk_Size 1m - Buffer_Size 1m - Mem_Buf_Limit 20m + Chunk_Size 10m + Buffer_Size 10m + Mem_Buf_Limit 200m [OUTPUT] Name oms diff --git a/build/linux/installer/conf/td-agent-bit-rs.conf b/build/linux/installer/conf/td-agent-bit-rs.conf index 696ac80e6..9613c270d 100644 --- a/build/linux/installer/conf/td-agent-bit-rs.conf +++ b/build/linux/installer/conf/td-agent-bit-rs.conf @@ -10,6 +10,19 @@ Parsers_File /etc/opt/microsoft/docker-cimprov/azm-containers-parser.conf Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log +[INPUT] + Name tail + Tag oms.container.log.flbplugin.terminationlog.* + Path /dev/write-to-traces + Read_from_Head true + DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* diff --git a/build/linux/installer/conf/td-agent-bit.conf b/build/linux/installer/conf/td-agent-bit.conf index 484a4bbbf..045aefcaf 100644 --- a/build/linux/installer/conf/td-agent-bit.conf +++ b/build/linux/installer/conf/td-agent-bit.conf @@ -15,6 +15,7 @@ Name tail Tag oms.container.log.la.* Path ${AZMON_LOG_TAIL_PATH} + Read_from_Head true DB /var/log/omsagent-fblogs.db DB.Sync Off Parser docker @@ -32,6 +33,7 @@ Name tail Tag oms.container.log.flbplugin.* Path /var/log/containers/omsagent*.log + Read_from_Head true DB /var/opt/microsoft/docker-cimprov/state/omsagent-ai.db DB.Sync Off Parser docker @@ -44,6 +46,7 @@ Name tail Tag oms.container.log.flbplugin.mdsd.* Path /var/opt/microsoft/linuxmonagent/log/mdsd.err + Read_from_Head true DB /var/opt/microsoft/docker-cimprov/state/mdsd-ai.db DB.Sync Off Parser docker @@ -52,6 +55,19 @@ Skip_Long_Lines On Ignore_Older 2m +[INPUT] + Name tail + Tag oms.container.log.flbplugin.terminationlog.* + Path /dev/write-to-traces + Read_from_Head true + DB /var/opt/microsoft/docker-cimprov/state/terminationlog-ai.db + DB.Sync Off + Parser docker + Mem_Buf_Limit 1m + Path_Key filepath + Skip_Long_Lines On + Ignore_Older 2m + [INPUT] Name tcp Tag oms.container.perf.telegraf.* diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index a82fa28eb..e3b0fc28e 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -26,6 +26,15 @@ then exit 1 fi +#test to exit non zero value if telegraf is not running +(ps -ef | grep telegraf | grep -v "grep") +if [ $? -ne 0 ] +then + echo "Telegraf is not running" > /dev/termination-log + echo "Telegraf is not running (controller: ${CONTROLLER_TYPE}, container type: ${CONTAINER_TYPE})" > /dev/write-to-traces # this file is tailed and sent to traces + exit 1 +fi + if [ -s "inotifyoutput.txt" ] then # inotifyoutput file has data(config map was applied) diff --git a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb index 345c51633..5ce5d79d2 100644 --- a/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb +++ b/build/linux/installer/scripts/tomlparser-mdm-metrics-config.rb @@ -13,6 +13,7 @@ @percentageMemoryRssThreshold = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD @percentageMemoryWorkingSetThreshold = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD +@jobCompletionThresholdMinutes = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES # Use parser to parse the configmap toml file to a ruby structure def parseConfigMap @@ -101,6 +102,25 @@ def populateSettingValuesFromConfigMap(parsedConfig) ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for PV utilization - #{errorStr}, using defaults, please check config map for errors") @percentagePVUsageThreshold = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD end + + # Get mdm metrics config settings for job completion + begin + jobCompletion = parsedConfig[:alertable_metrics_configuration_settings][:job_completion_threshold] + if !jobCompletion.nil? + jobCompletionThreshold = jobCompletion[:job_completion_threshold_time_minutes] + jobCompletionThresholdInt = jobCompletionThreshold.to_i + if jobCompletionThresholdInt.kind_of? Integer + @jobCompletionThresholdMinutes = jobCompletionThresholdInt + else + puts "config::Non interger value or value not convertible to integer specified for job completion threshold, using default " + @jobCompletionThresholdMinutes = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES + end + puts "config::Using config map settings for MDM metric configuration settings for job completion" + end + rescue => errorStr + ConfigParseErrorLogger.logError("Exception while reading config map settings for MDM metric configuration settings for job completion - #{errorStr}, using defaults, please check config map for errors") + @jobCompletionThresholdMinutes = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES + end end end @@ -125,6 +145,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) file.write("export AZMON_ALERT_CONTAINER_MEMORY_RSS_THRESHOLD=#{@percentageMemoryRssThreshold}\n") file.write("export AZMON_ALERT_CONTAINER_MEMORY_WORKING_SET_THRESHOLD=\"#{@percentageMemoryWorkingSetThreshold}\"\n") file.write("export AZMON_ALERT_PV_USAGE_THRESHOLD=#{@percentagePVUsageThreshold}\n") + file.write("export AZMON_ALERT_JOB_COMPLETION_TIME_THRESHOLD=#{@jobCompletionThresholdMinutes}\n") # Close file after writing all MDM setting environment variables file.close puts "****************End MDM Metrics Config Processing********************" diff --git a/build/version b/build/version index 83a0a174b..16a43604a 100644 --- a/build/version +++ b/build/version @@ -2,11 +2,11 @@ # Build Version Information -CONTAINER_BUILDVERSION_MAJOR=14 +CONTAINER_BUILDVERSION_MAJOR=15 CONTAINER_BUILDVERSION_MINOR=0 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210326 +CONTAINER_BUILDVERSION_DATE=20210422 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 9c8014ed0..00f3f49ed 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.8.2 +version: 2.8.3 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 8868b86bb..580ef9d15 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -118,6 +118,7 @@ spec: - name: docker-windows-containers hostPath: path: C:\ProgramData\docker\containers + type: DirectoryOrCreate - name: settings-vol-config configMap: name: container-azm-ms-agentconfig diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index fc2a3afaf..219198ebb 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,10 +21,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod03262021" - tagWindows: "win-ciprod03262021" + tag: "ciprod04222021" + tagWindows: "win-ciprod04222021" pullPolicy: IfNotPresent - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" agentVersion: "1.10.0.1" # The priority used by the omsagent priority class for the daemonset pods @@ -124,6 +124,21 @@ omsagent: operator: In values: - amd64 + nodeSelectorTerms: + - labelSelector: + matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + - key: type + operator: NotIn + values: + - virtual-kubelet + - key: kubernetes.io/arch + operator: In + values: + - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json b/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json new file mode 100644 index 000000000..a8a99e9f6 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/Parameters/ContainerInsightsExtension.Parameters.json @@ -0,0 +1,66 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutParameters.json", + "contentVersion": "1.0.0.0", + "wait": [ + { + "name": "waitSdpBakeTime", + "properties": { + "duration": "PT24H" + } + } + ], + "shellExtensions": [ + { + "name": "PushChartToACR", + "type": "ShellExtensionType", + "properties": { + "maxexecutiontime": "PT1H" + }, + "package": { + "reference": { + "path": "artifacts.tar.gz" + } + }, + "launch": { + "command": [ + "/bin/bash", + "pushChartToAcr.sh" + ], + "environmentVariables": [ + { + "name": "RELEASE_STAGE", + "value": "__RELEASE_STAGE__" + }, + { + "name": "ACR_APP_ID", + "reference": { + "provider": "AzureKeyVault", + "parameters": { + "secretId": "https://cibuildandreleasekv.vault.azure.net/secrets/ciprodacrappid/e8f47bf7505741ebaf65a4db16ff9fa7" + } + }, + "asSecureValue": "true" + }, + { + "name": "ACR_APP_SECRET", + "reference": { + "provider": "AzureKeyVault", + "parameters": { + "secretId": "https://cibuildandreleasekv.vault.azure.net/secrets/ciprodacrappsecret/8718afcdac114accb8b26f613cef1e1e" + } + }, + "asSecureValue": "true" + }, + { + "name": "ACR_NAME", + "value": "__ACR_NAME__" + }, + { + "name": "CHART_VERSION", + "value": "__CHART_VERSION__" + } + ] + } + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json new file mode 100644 index 000000000..cde103633 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Canary.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Canary", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-Canary", + "actions": [ "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json new file mode 100644 index 000000000..1749296c8 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.FF.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-FF", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-FF", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json new file mode 100644 index 000000000..50729b1ae --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.HighLoad.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Prod3", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-HighLoad", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json new file mode 100644 index 000000000..edd61f852 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.LightLoad.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Prod2", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-LightLoad", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json new file mode 100644 index 000000000..014f4b092 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MC.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-MC", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-MC", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json new file mode 100644 index 000000000..cd1befbc3 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.MediumLoad.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Prod2", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-MediumLoad", + "actions": ["wait/waitSdpBakeTime", "Shell/PushChartToACR" ], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json new file mode 100644 index 000000000..48c99fce1 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/RolloutSpecs/Public.Pilot.RolloutSpec.json @@ -0,0 +1,29 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/RolloutSpec.json", + "ContentVersion": "1.0.0.0", + "RolloutMetadata": { + "ServiceModelPath": "ServiceModels//Public.ServiceModel.json", + "ScopeBindingsPath": "ScopeBindings//Public.ScopeBindings.json", + "Name": "ContainerInsightsExtension-Pilot", + "RolloutType": "Major", + "BuildSource": { + "Parameters": { + "VersionFile": "buildver.txt" + } + }, + "notification": { + "email": { + "to": "omscontainers@microsoft.com" + } + } + }, + "orchestratedSteps": [ + { + "name": "PushChartToACR", + "targetType": "ServiceResource", + "targetName": "PushChartToACR-Pilot", + "actions": [ "wait/waitSdpBakeTime", "Shell/PushChartToACR"], + "dependsOn": [ ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json b/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json new file mode 100644 index 000000000..516eba3e2 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/ScopeBindings/Public.ScopeBindings.json @@ -0,0 +1,125 @@ +{ + "$schema": "https://ev2schema.azure.net/schemas/2020-01-01/scopeBindings.json", + "contentVersion": "0.0.0.1", + "scopeBindings": [ + { + "scopeTagName": "Canary", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "Canary" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "Pilot", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "Pilot" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "LightLoad", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "MediumLow" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "MediumLoad", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "MediumHigh" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "HighLoad", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "HighLoad" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "FF", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "FF" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + }, + { + "scopeTagName": "MC", + "bindings": [ + { + "find": "__RELEASE_STAGE__", + "replaceWith": "MC" + }, + { + "find": "__ACR_NAME__", + "replaceWith": "$(ACRName)" + }, + { + "find": "__CHART_VERSION__", + "replaceWith": "$(ChartVersion)" + } + ] + } + ] +} diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh b/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh new file mode 100644 index 000000000..520557592 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/Scripts/pushChartToAcr.sh @@ -0,0 +1,181 @@ +#!/bin/bash + +export HELM_EXPERIMENTAL_OCI=1 +export MCR_NAME="mcr.microsoft.com" +# for prod-> stable and for test -> preview +export REPO_TYPE="stable" + +# repo paths for arc k8s extension roll-out +# canary region +export CANARY_REGION_REPO_PATH="azuremonitor/containerinsights/canary/${REPO_TYPE}/azuremonitor-containers" +# pilot region +export PILOT_REGION_REPO_PATH="azuremonitor/containerinsights/prod1/${REPO_TYPE}/azuremonitor-containers" +# light load regions +export LIGHT_LOAD_REGION_REPO_PATH="azuremonitor/containerinsights/prod2/${REPO_TYPE}/azuremonitor-containers" +# medium load regions +export MEDIUM_LOAD_REGION_REPO_PATH="azuremonitor/containerinsights/prod3/${REPO_TYPE}/azuremonitor-containers" +# high load regions +export HIGH_LOAD_REGION_REPO_PATH="azuremonitor/containerinsights/prod4/${REPO_TYPE}/azuremonitor-containers" +# FairFax regions +export FF_REGION_REPO_PATH="azuremonitor/containerinsights/prod5/${REPO_TYPE}/azuremonitor-containers" +# Mooncake regions +export MC_REGION_REPO_PATH="azuremonitor/containerinsights/prod6/${REPO_TYPE}/azuremonitor-containers" + +# pull chart from previous stage mcr and push chart to next stage acr +pull_chart_from_source_mcr_to_push_to_dest_acr() { + srcMcrFullPath=${1} + destAcrFullPath=${2} + + if [ -z $srcMcrFullPath ]; then + echo "-e error source mcr path must be provided " + exit 1 + fi + + if [ -z $destAcrFullPath ]; then + echo "-e error dest acr path must be provided " + exit 1 + fi + + echo "Pulling chart from MCR:${srcMcrFullPath} ..." + helm chart pull ${srcMcrFullPath} + if [ $? -eq 0 ]; then + echo "Pulling chart from MCR:${srcMcrFullPath} completed successfully." + else + echo "-e error Pulling chart from MCR:${srcMcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "Exporting chart to current directory ..." + helm chart export ${srcMcrFullPath} + if [ $? -eq 0 ]; then + echo "Exporting chart to current directory completed successfully." + else + echo "-e error Exporting chart to current directory failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "save the chart locally with dest acr full path : ${destAcrFullPath} ..." + helm chart save azuremonitor-containers/ ${destAcrFullPath} + if [ $? -eq 0 ]; then + echo "save the chart locally with dest acr full path : ${destAcrFullPath} completed successfully." + else + echo "-e error save the chart locally with dest acr full path : ${destAcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "pushing the chart to acr path: ${destAcrFullPath} ..." + helm chart push ${destAcrFullPath} + if [ $? -eq 0 ]; then + echo "pushing the chart to acr path: ${destAcrFullPath} completed successfully." + else + echo "-e error pushing the chart to acr path: ${destAcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi +} + +# push to local release candidate chart to canary region +push_local_chart_to_canary_region() { + destAcrFullPath=${1} + if [ -z $destAcrFullPath ]; then + echo "-e error dest acr path must be provided " + exit 1 + fi + + echo "save the chart locally with dest acr full path : ${destAcrFullPath} ..." + helm chart save charts/azuremonitor-containers/ $destAcrFullPath + if [ $? -eq 0 ]; then + echo "save the chart locally with dest acr full path : ${destAcrFullPath} completed." + else + echo "-e error save the chart locally with dest acr full path : ${destAcrFullPath} failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi + + echo "pushing the chart to acr path: ${destAcrFullPath} ..." + helm chart push $destAcrFullPath + if [ $? -eq 0 ]; then + echo "pushing the chart to acr path: ${destAcrFullPath} completed successfully." + else + echo "-e error pushing the chart to acr path: ${destAcrFullPath} failed.Please review Ev2 pipeline logs for more details on the error." + exit 1 + fi +} + +echo "START - Release stage : ${RELEASE_STAGE}" + +# login to acr +echo "Using acr : ${ACR_NAME}" +echo "Using acr repo type: ${REPO_TYPE}" + +echo "login to acr:${ACR_NAME} using helm ..." +echo $ACR_APP_SECRET | helm registry login $ACR_NAME --username $ACR_APP_ID --password-stdin +if [ $? -eq 0 ]; then + echo "login to acr:${ACR_NAME} using helm completed successfully." +else + echo "-e error login to acr:${ACR_NAME} using helm failed. Please review Ev2 pipeline logs for more details on the error." + exit 1 +fi + +case $RELEASE_STAGE in + + Canary) + echo "START: Release stage - Canary" + destAcrFullPath=${ACR_NAME}/public/${CANARY_REGION_REPO_PATH}:${CHART_VERSION} + push_local_chart_to_canary_region $destAcrFullPath + echo "END: Release stage - Canary" + ;; + + Pilot | Prod1) + echo "START: Release stage - Pilot" + srcMcrFullPath=${MCR_NAME}/${CANARY_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${PILOT_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - Pilot" + ;; + + LightLoad | Pord2) + echo "START: Release stage - Light Load Regions" + srcMcrFullPath=${MCR_NAME}/${PILOT_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${LIGHT_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - Light Load Regions" + ;; + + MediumLoad | Prod3) + echo "START: Release stage - Medium Load Regions" + srcMcrFullPath=${MCR_NAME}/${LIGHT_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${MEDIUM_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - Medium Load Regions" + ;; + + HighLoad | Prod4) + echo "START: Release stage - High Load Regions" + srcMcrFullPath=${MCR_NAME}/${MEDIUM_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${HIGH_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - High Load Regions" + ;; + + FF | Prod5) + echo "START: Release stage - FF" + srcMcrFullPath=${MCR_NAME}/${HIGH_LOAD_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${FF_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - FF" + ;; + + MC | Prod6) + echo "START: Release stage - MC" + srcMcrFullPath=${MCR_NAME}/${FF_REGION_REPO_PATH}:${CHART_VERSION} + destAcrFullPath=${ACR_NAME}/public/${MC_REGION_REPO_PATH}:${CHART_VERSION} + pull_chart_from_source_mcr_to_push_to_dest_acr $srcMcrFullPath $destAcrFullPath + echo "END: Release stage - MC" + ;; + + *) + echo -n "unknown release stage" + exit 1 + ;; +esac + +echo "END - Release stage : ${RELEASE_STAGE}" diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json b/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json new file mode 100644 index 000000000..71081661a --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/ServiceModels/Public.ServiceModel.json @@ -0,0 +1,159 @@ +{ + "$schema": "http://schema.express.azure.com/schemas/2015-01-01-alpha/ServiceModel.json", + "ContentVersion": "0.0.0.1", + "ServiceMetadata": { + "ServiceGroup": "ContainerInsightsExtension", + "Environment": "Prod" + }, + "ServiceResourceGroupDefinitions": [ + { + "Name": "ARC-Extension-ServiceResourceGroupDefinition", + "ServiceResourceDefinitions": [ + { + "Name": "ShellExtension", + "ComposedOf": { + "Extension": { + "Shell": [ + { + "type": "ShellExtensionType", + "properties": { + "imageName": "adm-ubuntu-1804-l", + "imageVersion": "v18" + } + } + ] + } + } + } + ] + } + ], + "ServiceResourceGroups": [ + { + "AzureResourceGroupName": "ContainerInsightsExtension-Canary-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "Canary" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-Canary", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-Pilot-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "Pilot" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-Pilot", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-LightLoad-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "LightLoad" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-LightLoad", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-MediumLoad-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "MediumLoad" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-MediumLoad", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-HighLoad-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "HighLoad" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-HighLoad", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-FF-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "FF" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-FF", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + }, + { + "AzureResourceGroupName": "ContainerInsightsExtension-MC-Release", + "Location": "eastus2", + "InstanceOf": "ARC-Extension-ServiceResourceGroupDefinition", + "AzureSubscriptionId": "5fab7b6f-6150-42fe-89e1-0f07a0a9a46f", + "ScopeTags": [ + { + "Name": "MC" + } + ], + "ServiceResources": [ + { + "Name": "PushChartToACR-MC", + "InstanceOf": "ShellExtension", + "RolloutParametersPath": "Parameters\\ContainerInsightsExtension.Parameters.json" + } + ] + } + ] + } diff --git a/deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt b/deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt new file mode 100644 index 000000000..1921233b3 --- /dev/null +++ b/deployment/arc-k8s-extension/ServiceGroupRoot/buildver.txt @@ -0,0 +1 @@ +1.0.0.0 diff --git a/kubernetes/container-azm-ms-agentconfig.yaml b/kubernetes/container-azm-ms-agentconfig.yaml index e38d9b4ab..543f270c1 100644 --- a/kubernetes/container-azm-ms-agentconfig.yaml +++ b/kubernetes/container-azm-ms-agentconfig.yaml @@ -126,6 +126,11 @@ data: [alertable_metrics_configuration_settings.pv_utilization_thresholds] # Threshold for persistent volume usage bytes, metric will be sent only when persistent volume utilization exceeds or becomes equal to the following percentage pv_usage_threshold_percentage = 60.0 + + # Alertable metrics configuration settings for completed jobs count + [alertable_metrics_configuration_settings.job_completion_threshold] + # Threshold for completed job count , metric will be sent only for those jobs which were completed earlier than the following threshold + job_completion_threshold_time_minutes = 360 integrations: |- [integrations.azure_network_policy_manager] collect_basic_metrics = false diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 76b8622b4..d5ece4509 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod03262021 +ARG IMAGE_TAG=ciprod04222021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 71e46875b..81db6f3a4 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -699,6 +699,10 @@ dpkg -l | grep td-agent-bit | awk '{print $2 " " $3}' +# Write messages from the liveness probe to stdout (so telemetry picks it up) +touch /dev/write-to-traces + + echo "stopping rsyslog..." service rsyslog stop diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index 218e3c717..ee3756964 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -85,3 +85,7 @@ rm -f $TMPDIR/docker-cimprov*.sh rm -f $TMPDIR/azure-mdsd*.deb rm -f $TMPDIR/mdsd.xml rm -f $TMPDIR/envmdsd + +# Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files +# in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. +rm /etc/logrotate.d/alternatives /etc/logrotate.d/apt /etc/logrotate.d/azure-mdsd /etc/logrotate.d/rsyslog diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 206d9a8f0..feea3f29a 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: @@ -446,12 +446,12 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: cpu: 500m - memory: 400Mi + memory: 1Gi requests: cpu: 75m memory: 225Mi @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: @@ -750,7 +750,7 @@ spec: tier: node-win annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "14.0.0-0" + dockerProviderVersion: "15.0.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -760,7 +760,7 @@ spec: value: "3" containers: - name: omsagent-win - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod04222021" imagePullPolicy: IfNotPresent resources: limits: @@ -839,6 +839,7 @@ spec: - name: docker-windows-containers hostPath: path: C:\ProgramData\docker\containers + type: DirectoryOrCreate - name: settings-vol-config configMap: name: container-azm-ms-agentconfig diff --git a/kubernetes/windows/Dockerfile b/kubernetes/windows/Dockerfile index e4ace417a..fefd089a8 100644 --- a/kubernetes/windows/Dockerfile +++ b/kubernetes/windows/Dockerfile @@ -3,7 +3,7 @@ MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=win-ciprod03262021 +ARG IMAGE_TAG=win-ciprod04222021 # Do not split this into multiple RUN! # Docker creates a layer for every RUN-Statement diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index ec57bf981..02ea73267 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -46,10 +46,10 @@ param( [Parameter(mandatory = $false)] [string]$kubeContext, [Parameter(mandatory = $false)] - [string]$servicePrincipalClientSecret, - [Parameter(mandatory = $false)] [string]$tenantId, [Parameter(mandatory = $false)] + [string]$kubeContext, + [Parameter(mandatory = $false)] [string]$azureCloudName ) @@ -64,7 +64,7 @@ $isUsingServicePrincipal = $false # released chart version in mcr $mcr = "mcr.microsoft.com" -$mcrChartVersion = "2.8.2" +$mcrChartVersion = "2.8.3" $mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" $helmLocalRepoName = "." $omsAgentDomainName="opinsights.azure.com" diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 9747d932d..f27f944fd 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -44,7 +44,7 @@ defaultAzureCloud="AzureCloud" omsAgentDomainName="opinsights.azure.com" # released chart version in mcr -mcrChartVersion="2.8.2" +mcrChartVersion="2.8.3" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" helmLocalRepoName="." diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh index 1cf7b5c97..5456a7072 100644 --- a/scripts/onboarding/managed/upgrade-monitoring.sh +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -20,7 +20,7 @@ set -e set -o pipefail # released chart version for Azure Arc enabled Kubernetes public preview -mcrChartVersion="2.8.2" +mcrChartVersion="2.8.3" mcr="mcr.microsoft.com" mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json index 0069307b7..95e7ba5d0 100644 --- a/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterOnboarding.json @@ -14,13 +14,6 @@ "description": "Location of the Azure Arc Connected Cluster Resource e.g. \"eastus\"" } }, - "proxyEndpointUrl": { - "type": "string", - "defaultValue": "", - "metadata": { - "description": "If the cluster behind forward proxy, then specify Proxy Endpoint URL in this format: http(s)://:@:" - } - }, "workspaceResourceId": { "type": "string", "metadata": { diff --git a/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json index b74b5ac95..6829d3d05 100644 --- a/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json +++ b/scripts/onboarding/templates/arc-k8s-extension/existingClusterParam.json @@ -8,9 +8,6 @@ "clusterRegion": { "value": "" }, - "proxyEndpointUrl": { - "value": "" - }, "workspaceResourceId": { "value": "/subscriptions//resourcegroups//providers/microsoft.operationalinsights/workspaces/" }, diff --git a/source/plugins/go/src/telemetry.go b/source/plugins/go/src/telemetry.go index 48f82a9ab..461fdea96 100644 --- a/source/plugins/go/src/telemetry.go +++ b/source/plugins/go/src/telemetry.go @@ -153,8 +153,6 @@ func SendContainerLogPluginMetrics(telemetryPushIntervalProperty string) { SendEvent(eventNameCustomPrometheusSidecarHeartbeat, telemetryDimensions) - } else if strings.Compare(strings.ToLower(os.Getenv("OS_TYPE")), "windows") == 0 { - SendEvent(eventNameWindowsFluentBitHeartbeat, make(map[string]string)) } else { SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) flushRateMetric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) diff --git a/source/plugins/ruby/KubernetesApiClient.rb b/source/plugins/ruby/KubernetesApiClient.rb index c5a363741..98347d272 100644 --- a/source/plugins/ruby/KubernetesApiClient.rb +++ b/source/plugins/ruby/KubernetesApiClient.rb @@ -31,6 +31,8 @@ class KubernetesApiClient @@TokenStr = nil @@NodeMetrics = Hash.new @@WinNodeArray = [] + @@telemetryTimeTracker = DateTime.now.to_time.to_i + @@resourceLimitsTelemetryHash = {} def initialize end @@ -403,9 +405,12 @@ def getPodUid(podNameSpace, podMetadata) def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToCollect, metricNametoReturn, metricTime = Time.now.utc.iso8601) metricItems = [] + timeDifference = (DateTime.now.to_time.to_i - @@telemetryTimeTracker).abs + timeDifferenceInMinutes = timeDifference / 60 begin clusterId = getClusterId podNameSpace = pod["metadata"]["namespace"] + podName = pod["metadata"]["name"] podUid = getPodUid(podNameSpace, pod["metadata"]) if podUid.nil? return metricItems @@ -456,6 +461,33 @@ def getContainerResourceRequestsAndLimits(pod, metricCategory, metricNameToColle metricProps["Collections"].push(metricCollections) metricItem["DataItems"].push(metricProps) metricItems.push(metricItem) + #Telemetry about omsagent requests and limits + begin + if (podName.downcase.start_with?("omsagent-") && podNameSpace.eql?("kube-system") && containerName.downcase.start_with?("omsagent")) + nodePodContainerKey = [nodeName, podName, containerName, metricNametoReturn].join("~~") + @@resourceLimitsTelemetryHash[nodePodContainerKey] = metricValue + end + if (timeDifferenceInMinutes >= Constants::TELEMETRY_FLUSH_INTERVAL_IN_MINUTES) + @@resourceLimitsTelemetryHash.each { |key, value| + keyElements = key.split("~~") + if keyElements.length != 4 + next + end + + # get dimension values by key + telemetryProps = {} + telemetryProps["Computer"] = keyElements[0] + telemetryProps["PodName"] = keyElements[1] + telemetryProps["ContainerName"] = keyElements[2] + metricNameFromKey = keyElements[3] + ApplicationInsightsUtility.sendMetricTelemetry(metricNameFromKey, value, telemetryProps) + } + @@telemetryTimeTracker = DateTime.now.to_time.to_i + @@resourceLimitsTelemetryHash = {} + end + rescue => errorStr + $log.warn("Exception while generating Telemetry from getContainerResourceRequestsAndLimits failed: #{errorStr} for metric #{metricNameToCollect}") + end #No container level limit for the given metric, so default to node level limit else nodeMetricsHashKey = clusterId + "/" + nodeName + "_" + "allocatable" + "_" + metricNameToCollect @@ -791,7 +823,7 @@ def getKubeAPIServerUrl def getKubeServicesInventoryRecords(serviceList, batchTime = Time.utc.iso8601) kubeServiceRecords = [] begin - if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].nil? && !serviceList["items"].empty? ) + if (!serviceList.nil? && !serviceList.empty? && serviceList.key?("items") && !serviceList["items"].nil? && !serviceList["items"].empty?) servicesCount = serviceList["items"].length @Log.info("KubernetesApiClient::getKubeServicesInventoryRecords : number of services in serviceList #{servicesCount} @ #{Time.now.utc.iso8601}") serviceList["items"].each do |item| diff --git a/source/plugins/ruby/MdmAlertTemplates.rb b/source/plugins/ruby/MdmAlertTemplates.rb index ef63cf219..e889c3f09 100644 --- a/source/plugins/ruby/MdmAlertTemplates.rb +++ b/source/plugins/ruby/MdmAlertTemplates.rb @@ -28,7 +28,7 @@ class MdmAlertTemplates } }' - Stable_job_metrics_template = ' + Stable_job_metrics_template = ' { "time": "%{timestamp}", "data": { @@ -45,7 +45,7 @@ class MdmAlertTemplates "dimValues": [ "%{controllerNameDimValue}", "%{namespaceDimValue}", - "6" + "%{jobCompletionThreshold}" ], "min": %{containerCountMetricValue}, "max": %{containerCountMetricValue}, @@ -90,6 +90,39 @@ class MdmAlertTemplates } }' + Container_resource_threshold_violation_template = ' + { + "time": "%{timestamp}", + "data": { + "baseData": { + "metric": "%{metricName}", + "namespace": "insights.container/containers", + "dimNames": [ + "containerName", + "podName", + "controllerName", + "Kubernetes namespace", + "thresholdPercentage" + ], + "series": [ + { + "dimValues": [ + "%{containerNameDimValue}", + "%{podNameDimValue}", + "%{controllerNameDimValue}", + "%{namespaceDimValue}", + "%{thresholdPercentageDimValue}" + ], + "min": %{containerResourceThresholdViolated}, + "max": %{containerResourceThresholdViolated}, + "sum": %{containerResourceThresholdViolated}, + "count": 1 + } + ] + } + } + }' + PV_resource_utilization_template = ' { "time": "%{timestamp}", @@ -123,6 +156,38 @@ class MdmAlertTemplates } }' + PV_resource_threshold_violation_template = ' + { + "time": "%{timestamp}", + "data": { + "baseData": { + "metric": "%{metricName}", + "namespace": "insights.container/persistentvolumes", + "dimNames": [ + "podName", + "node", + "kubernetesNamespace", + "volumeName", + "thresholdPercentage" + ], + "series": [ + { + "dimValues": [ + "%{podNameDimValue}", + "%{computerNameDimValue}", + "%{namespaceDimValue}", + "%{volumeNameDimValue}", + "%{thresholdPercentageDimValue}" + ], + "min": %{pvResourceThresholdViolated}, + "max": %{pvResourceThresholdViolated}, + "sum": %{pvResourceThresholdViolated}, + "count": 1 + } + ] + } + } + }' Node_resource_metrics_template = ' { diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 12d462e44..199cacd37 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -39,10 +39,21 @@ class MdmMetricsGenerator Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC, } + @@container_metric_name_metric_threshold_violated_hash = { + Constants::CPU_USAGE_MILLI_CORES => Constants::MDM_CONTAINER_CPU_THRESHOLD_VIOLATED_METRIC, + Constants::CPU_USAGE_NANO_CORES => Constants::MDM_CONTAINER_CPU_THRESHOLD_VIOLATED_METRIC, + Constants::MEMORY_RSS_BYTES => Constants::MDM_CONTAINER_MEMORY_RSS_THRESHOLD_VIOLATED_METRIC, + Constants::MEMORY_WORKING_SET_BYTES => Constants::MDM_CONTAINER_MEMORY_WORKING_SET_THRESHOLD_VIOLATED_METRIC, + } + @@pod_metric_name_metric_percentage_name_hash = { Constants::PV_USED_BYTES => Constants::MDM_PV_UTILIZATION_METRIC, } + @@pod_metric_name_metric_threshold_violated_hash = { + Constants::PV_USED_BYTES => Constants::MDM_PV_THRESHOLD_VIOLATED_METRIC, + } + # Setting this to true since we need to send zero filled metrics at startup. If metrics are absent alert creation fails @sendZeroFilledMetrics = true @zeroFilledMetricsTimeTracker = DateTime.now.to_time.to_i @@ -96,13 +107,28 @@ def appendPodMetrics(records, metricName, metricHash, batch_time, metricsTemplat podControllerNameDimValue = key_elements[0] podNamespaceDimValue = key_elements[1] - record = metricsTemplate % { - timestamp: batch_time, - metricName: metricName, - controllerNameDimValue: podControllerNameDimValue, - namespaceDimValue: podNamespaceDimValue, - containerCountMetricValue: value, - } + # Special handling for jobs since we need to send the threshold as a dimension as it is configurable + if metricName == Constants::MDM_STALE_COMPLETED_JOB_COUNT + metric_threshold_hash = getContainerResourceUtilizationThresholds + #Converting this to hours since we already have olderThanHours dimension. + jobCompletionThresholdHours = (metric_threshold_hash[Constants::JOB_COMPLETION_TIME] / 60.0).round(2) + record = metricsTemplate % { + timestamp: batch_time, + metricName: metricName, + controllerNameDimValue: podControllerNameDimValue, + namespaceDimValue: podNamespaceDimValue, + containerCountMetricValue: value, + jobCompletionThreshold: jobCompletionThresholdHours, + } + else + record = metricsTemplate % { + timestamp: batch_time, + metricName: metricName, + controllerNameDimValue: podControllerNameDimValue, + namespaceDimValue: podNamespaceDimValue, + containerCountMetricValue: value, + } + end records.push(Yajl::Parser.parse(StringIO.new(record))) } else @@ -129,9 +155,11 @@ def flushPodMdmMetricTelemetry staleJobHashValues = @stale_job_count_hash.values staleJobMetricCount = staleJobHashValues.inject(0) { |sum, x| sum + x } + metric_threshold_hash = getContainerResourceUtilizationThresholds properties["ContainerRestarts"] = containerRestartMetricCount properties["OomKilledContainers"] = oomKilledContainerMetricCount properties["OldCompletedJobs"] = staleJobMetricCount + properties["JobCompletionThesholdTimeInMinutes"] = metric_threshold_hash[Constants::JOB_COMPLETION_TIME] ApplicationInsightsUtility.sendCustomEvent(Constants::CONTAINER_METRICS_HEART_BEAT_EVENT, properties) ApplicationInsightsUtility.sendCustomEvent(Constants::POD_READY_PERCENTAGE_HEART_BEAT_EVENT, {}) rescue => errorStr @@ -158,29 +186,63 @@ def zeroFillMetricRecords(records, batch_time) metric_threshold_hash = getContainerResourceUtilizationThresholds container_zero_fill_dims = [Constants::OMSAGENT_ZERO_FILL, Constants::OMSAGENT_ZERO_FILL, Constants::OMSAGENT_ZERO_FILL, Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL].join("~~") - containerCpuRecord = getContainerResourceUtilMetricRecords(batch_time, - Constants::CPU_USAGE_NANO_CORES, - 0, - container_zero_fill_dims, - metric_threshold_hash[Constants::CPU_USAGE_NANO_CORES]) - if !containerCpuRecord.nil? && !containerCpuRecord.empty? && !containerCpuRecord[0].nil? && !containerCpuRecord[0].empty? - records.push(containerCpuRecord[0]) + containerCpuRecords = getContainerResourceUtilMetricRecords(batch_time, + Constants::CPU_USAGE_NANO_CORES, + 0, + container_zero_fill_dims, + metric_threshold_hash[Constants::CPU_USAGE_NANO_CORES], + true) + if !containerCpuRecords.nil? && !containerCpuRecords.empty? + containerCpuRecords.each { |cpuRecord| + if !cpuRecord.nil? && !cpuRecord.empty? + records.push(cpuRecord) + end + } end - containerMemoryRssRecord = getContainerResourceUtilMetricRecords(batch_time, - Constants::MEMORY_RSS_BYTES, - 0, - container_zero_fill_dims, - metric_threshold_hash[Constants::MEMORY_RSS_BYTES]) - if !containerMemoryRssRecord.nil? && !containerMemoryRssRecord.empty? && !containerMemoryRssRecord[0].nil? && !containerMemoryRssRecord[0].empty? - records.push(containerMemoryRssRecord[0]) + containerMemoryRssRecords = getContainerResourceUtilMetricRecords(batch_time, + Constants::MEMORY_RSS_BYTES, + 0, + container_zero_fill_dims, + metric_threshold_hash[Constants::MEMORY_RSS_BYTES], + true) + if !containerMemoryRssRecords.nil? && !containerMemoryRssRecords.empty? + containerMemoryRssRecords.each { |memoryRssRecord| + if !memoryRssRecord.nil? && !memoryRssRecord.empty? + records.push(memoryRssRecord) + end + } end - containerMemoryWorkingSetRecord = getContainerResourceUtilMetricRecords(batch_time, - Constants::MEMORY_WORKING_SET_BYTES, - 0, - container_zero_fill_dims, - metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES]) - if !containerMemoryWorkingSetRecord.nil? && !containerMemoryWorkingSetRecord.empty? && !containerMemoryWorkingSetRecord[0].nil? && !containerMemoryWorkingSetRecord[0].empty? - records.push(containerMemoryWorkingSetRecord[0]) + containerMemoryWorkingSetRecords = getContainerResourceUtilMetricRecords(batch_time, + Constants::MEMORY_WORKING_SET_BYTES, + 0, + container_zero_fill_dims, + metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES], + true) + if !containerMemoryWorkingSetRecords.nil? && !containerMemoryWorkingSetRecords.empty? + containerMemoryWorkingSetRecords.each { |workingSetRecord| + if !workingSetRecord.nil? && !workingSetRecord.empty? + records.push(workingSetRecord) + end + } + end + + pvZeroFillDims = {} + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = Constants::OMSAGENT_ZERO_FILL + pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] = Constants::VOLUME_NAME_ZERO_FILL + pvResourceUtilMetricRecords = getPVResourceUtilMetricRecords(batch_time, + Constants::PV_USED_BYTES, + @@hostName, + 0, + pvZeroFillDims, + metric_threshold_hash[Constants::PV_USED_BYTES], + true) + if !pvResourceUtilMetricRecords.nil? && !pvResourceUtilMetricRecords.empty? + pvResourceUtilMetricRecords.each { |pvRecord| + if !pvRecord.nil? && !pvRecord.empty? + records.push(pvRecord) + end + } end pvZeroFillDims = {} @@ -247,7 +309,7 @@ def appendAllPodMetrics(records, batch_time) return records end - def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentageMetricValue, dims, thresholdPercentage) + def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentageMetricValue, dims, thresholdPercentage, isZeroFill = false) records = [] begin if dims.nil? @@ -276,6 +338,19 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag thresholdPercentageDimValue: thresholdPercentage, } records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + + # Adding another metric for threshold violation + resourceThresholdViolatedRecord = MdmAlertTemplates::Container_resource_threshold_violation_template % { + timestamp: recordTimeStamp, + metricName: @@container_metric_name_metric_threshold_violated_hash[metricName], + containerNameDimValue: containerName, + podNameDimValue: podName, + controllerNameDimValue: controllerName, + namespaceDimValue: podNamespace, + containerResourceThresholdViolated: isZeroFill ? 0 : 1, + thresholdPercentageDimValue: thresholdPercentage, + } + records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) rescue => errorStr @log.info "Error in getContainerResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -283,7 +358,7 @@ def getContainerResourceUtilMetricRecords(recordTimeStamp, metricName, percentag return records end - def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percentageMetricValue, dims, thresholdPercentage) + def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percentageMetricValue, dims, thresholdPercentage, isZeroFill = false) records = [] begin containerName = dims[Constants::INSIGHTSMETRICS_TAGS_CONTAINER_NAME] @@ -303,6 +378,19 @@ def getPVResourceUtilMetricRecords(recordTimeStamp, metricName, computer, percen thresholdPercentageDimValue: thresholdPercentage, } records.push(Yajl::Parser.parse(StringIO.new(resourceUtilRecord))) + + # Adding another metric for threshold violation + resourceThresholdViolatedRecord = MdmAlertTemplates::PV_resource_threshold_violation_template % { + timestamp: recordTimeStamp, + metricName: @@pod_metric_name_metric_threshold_violated_hash[metricName], + podNameDimValue: podName, + computerNameDimValue: computer, + namespaceDimValue: pvcNamespace, + volumeNameDimValue: volumeName, + pvResourceThresholdViolated: isZeroFill ? 0 : 1, + thresholdPercentageDimValue: thresholdPercentage, + } + records.push(Yajl::Parser.parse(StringIO.new(resourceThresholdViolatedRecord))) rescue => errorStr @log.info "Error in getPVResourceUtilMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) @@ -408,6 +496,7 @@ def getContainerResourceUtilizationThresholds metric_threshold_hash[Constants::MEMORY_RSS_BYTES] = Constants::DEFAULT_MDM_MEMORY_RSS_THRESHOLD metric_threshold_hash[Constants::MEMORY_WORKING_SET_BYTES] = Constants::DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD metric_threshold_hash[Constants::PV_USED_BYTES] = Constants::DEFAULT_MDM_PV_UTILIZATION_THRESHOLD + metric_threshold_hash[Constants::JOB_COMPLETION_TIME] = Constants::DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES cpuThreshold = ENV["AZMON_ALERT_CONTAINER_CPU_THRESHOLD"] if !cpuThreshold.nil? && !cpuThreshold.empty? @@ -433,6 +522,12 @@ def getContainerResourceUtilizationThresholds pvUsagePercentageThresholdFloat = (pvUsagePercentageThreshold.to_f).round(2) metric_threshold_hash[Constants::PV_USED_BYTES] = pvUsagePercentageThresholdFloat end + + jobCompletionTimeThreshold = ENV["AZMON_ALERT_JOB_COMPLETION_TIME_THRESHOLD"] + if !jobCompletionTimeThreshold.nil? && !jobCompletionTimeThreshold.empty? + jobCompletionTimeThresholdInt = jobCompletionTimeThreshold.to_i + metric_threshold_hash[Constants::JOB_COMPLETION_TIME] = jobCompletionTimeThresholdInt + end rescue => errorStr @log.info "Error in getContainerResourceUtilizationThresholds: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) diff --git a/source/plugins/ruby/constants.rb b/source/plugins/ruby/constants.rb index cf41900dc..906019b95 100644 --- a/source/plugins/ruby/constants.rb +++ b/source/plugins/ruby/constants.rb @@ -53,6 +53,10 @@ class Constants MDM_CONTAINER_MEMORY_RSS_UTILIZATION_METRIC = "memoryRssExceededPercentage" MDM_CONTAINER_MEMORY_WORKING_SET_UTILIZATION_METRIC = "memoryWorkingSetExceededPercentage" MDM_PV_UTILIZATION_METRIC = "pvUsageExceededPercentage" + MDM_CONTAINER_CPU_THRESHOLD_VIOLATED_METRIC = "cpuThresholdViolated" + MDM_CONTAINER_MEMORY_RSS_THRESHOLD_VIOLATED_METRIC = "memoryRssThresholdViolated" + MDM_CONTAINER_MEMORY_WORKING_SET_THRESHOLD_VIOLATED_METRIC = "memoryWorkingSetThresholdViolated" + MDM_PV_THRESHOLD_VIOLATED_METRIC = "pvUsageThresholdViolated" MDM_NODE_CPU_USAGE_PERCENTAGE = "cpuUsagePercentage" MDM_NODE_MEMORY_RSS_PERCENTAGE = "memoryRssPercentage" MDM_NODE_MEMORY_WORKING_SET_PERCENTAGE = "memoryWorkingSetPercentage" @@ -65,21 +69,22 @@ class Constants MEMORY_WORKING_SET_BYTES = "memoryWorkingSetBytes" MEMORY_RSS_BYTES = "memoryRssBytes" PV_USED_BYTES = "pvUsedBytes" + JOB_COMPLETION_TIME = "completedJobTimeMinutes" DEFAULT_MDM_CPU_UTILIZATION_THRESHOLD = 95.0 DEFAULT_MDM_MEMORY_RSS_THRESHOLD = 95.0 DEFAULT_MDM_MEMORY_WORKING_SET_THRESHOLD = 95.0 DEFAULT_MDM_PV_UTILIZATION_THRESHOLD = 60.0 + DEFAULT_MDM_JOB_COMPLETED_TIME_THRESHOLD_MINUTES = 360 CONTROLLER_KIND_JOB = "job" CONTAINER_TERMINATION_REASON_COMPLETED = "completed" CONTAINER_STATE_TERMINATED = "terminated" - STALE_JOB_TIME_IN_MINUTES = 360 TELEGRAF_DISK_METRICS = "container.azm.ms/disk" OMSAGENT_ZERO_FILL = "omsagent" KUBESYSTEM_NAMESPACE_ZERO_FILL = "kube-system" VOLUME_NAME_ZERO_FILL = "-" - PV_TYPES =["awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "cinder", "csi", "fc", "flexVolume", - "flocker", "gcePersistentDisk", "glusterfs", "hostPath", "iscsi", "local", "nfs", - "photonPersistentDisk", "portworxVolume", "quobyte", "rbd", "scaleIO", "storageos", "vsphereVolume"] + PV_TYPES = ["awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "cinder", "csi", "fc", "flexVolume", + "flocker", "gcePersistentDisk", "glusterfs", "hostPath", "iscsi", "local", "nfs", + "photonPersistentDisk", "portworxVolume", "quobyte", "rbd", "scaleIO", "storageos", "vsphereVolume"] #Telemetry constants CONTAINER_METRICS_HEART_BEAT_EVENT = "ContainerMetricsMdmHeartBeatEvent" diff --git a/source/plugins/ruby/filter_cadvisor2mdm.rb b/source/plugins/ruby/filter_cadvisor2mdm.rb index 8d7e729c8..659e3000c 100644 --- a/source/plugins/ruby/filter_cadvisor2mdm.rb +++ b/source/plugins/ruby/filter_cadvisor2mdm.rb @@ -9,6 +9,7 @@ module Fluent require_relative "CustomMetricsUtils" require_relative "kubelet_utils" require_relative "MdmMetricsGenerator" + require_relative "in_kube_nodes" class CAdvisor2MdmFilter < Filter Fluent::Plugin.register_filter("filter_cadvisor2mdm", self) @@ -23,6 +24,7 @@ class CAdvisor2MdmFilter < Filter @metrics_to_collect_hash = {} @@metric_threshold_hash = {} + @@controller_type = "" def initialize super @@ -63,6 +65,7 @@ def start @containerResourceDimensionHash = {} @pvUsageHash = {} @@metric_threshold_hash = MdmMetricsGenerator.getContainerResourceUtilizationThresholds + @NodeCache = Fluent::NodeStatsCache.new() end rescue => e @log.info "Error initializing plugin #{e}" @@ -161,19 +164,40 @@ def filter(tag, time, record) if counter_name == Constants::CPU_USAGE_NANO_CORES metric_name = Constants::CPU_USAGE_MILLI_CORES metric_value /= 1000000 #cadvisor record is in nanocores. Convert to mc - @log.info "Metric_value: #{metric_value} CPU Capacity #{@cpu_capacity}" - if @cpu_capacity != 0.0 - percentage_metric_value = (metric_value) * 100 / @cpu_capacity + if @@controller_type.downcase == "replicaset" + target_node_cpu_capacity_mc = @NodeCache.cpu.get_capacity(record["DataItems"][0]["Host"]) / 1000000 + else + target_node_cpu_capacity_mc = @cpu_capacity + end + @log.info "Metric_value: #{metric_value} CPU Capacity #{target_node_cpu_capacity_mc}" + if target_node_cpu_capacity_mc != 0.0 + percentage_metric_value = (metric_value) * 100 / target_node_cpu_capacity_mc end end if counter_name.start_with?("memory") metric_name = counter_name - if @memory_capacity != 0.0 - percentage_metric_value = metric_value * 100 / @memory_capacity + if @@controller_type.downcase == "replicaset" + target_node_mem_capacity = @NodeCache.mem.get_capacity(record["DataItems"][0]["Host"]) + else + target_node_mem_capacity = @memory_capacity + end + @log.info "Metric_value: #{metric_value} Memory Capacity #{target_node_mem_capacity}" + if target_node_mem_capacity != 0.0 + percentage_metric_value = metric_value * 100 / target_node_mem_capacity end + end + @log.info "percentage_metric_value for metric: #{metric_name} for instance: #{record["DataItems"][0]["Host"]} percentage: #{percentage_metric_value}" + + # do some sanity checking. Do we want this? + if percentage_metric_value > 100.0 or percentage_metric_value < 0.0 + telemetryProperties = {} + telemetryProperties["Computer"] = record["DataItems"][0]["Host"] + telemetryProperties["MetricName"] = metric_name + telemetryProperties["MetricPercentageValue"] = percentage_metric_value + ApplicationInsightsUtility.sendCustomEvent("ErrorPercentageOutOfBounds", telemetryProperties) end - # return get_metric_records(record, metric_name, metric_value, percentage_metric_value) + return MdmMetricsGenerator.getNodeResourceMetricRecords(record, metric_name, metric_value, percentage_metric_value) elsif object_name == Constants::OBJECT_NAME_K8S_CONTAINER && @metrics_to_collect_hash.key?(counter_name.downcase) instanceName = record["DataItems"][0]["InstanceName"] @@ -279,8 +303,8 @@ def ensure_cpu_memory_capacity_set return end - controller_type = ENV["CONTROLLER_TYPE"] - if controller_type.downcase == "replicaset" + @@controller_type = ENV["CONTROLLER_TYPE"] + if @@controller_type.downcase == "replicaset" @log.info "ensure_cpu_memory_capacity_set @cpu_capacity #{@cpu_capacity} @memory_capacity #{@memory_capacity}" begin @@ -306,7 +330,7 @@ def ensure_cpu_memory_capacity_set @log.info "Error getting memory_capacity" end end - elsif controller_type.downcase == "daemonset" + elsif @@controller_type.downcase == "daemonset" capacity_from_kubelet = KubeletUtils.get_node_capacity # Error handling in case /metrics/cadvsior endpoint fails diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index c057f7c2c..99e804302 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -9,6 +9,7 @@ class Kube_nodeInventory_Input < Input @@MDMKubeNodeInventoryTag = "mdm.kubenodeinventory" @@configMapMountPath = "/etc/config/settings/log-data-collection-settings" @@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings" + @@osmConfigMountPath = "/etc/config/osm-settings/osm-metric-collection-configuration" @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json" @@kubeperfTag = "oms.api.KubePerf" @@ -42,6 +43,8 @@ def initialize @nodeInventoryE2EProcessingLatencyMs = 0 @nodesAPIE2ELatencyMs = 0 require_relative "constants" + + @NodeCache = NodeStatsCache.new() end config_param :run_interval, :time, :default => 60 @@ -196,6 +199,15 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) end end + # Only CPU and Memory capacity for windows nodes get added to the cache (at end of file) + is_windows_node = false + if !item["status"].nil? && !item["status"]["nodeInfo"].nil? && !item["status"]["nodeInfo"]["operatingSystem"].nil? + operatingSystem = item["status"]["nodeInfo"]["operatingSystem"] + if (operatingSystem.is_a?(String) && operatingSystem.casecmp("windows") == 0) + is_windows_node = true + end + end + # node metrics records nodeMetricRecords = [] nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "allocatable", "cpu", "cpuAllocatableNanoCores", batchTime) @@ -209,10 +221,18 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "cpu", "cpuCapacityNanoCores", batchTime) if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? nodeMetricRecords.push(nodeMetricRecord) + # add data to the cache so filter_cadvisor2mdm.rb can use it + if is_windows_node + @NodeCache.cpu.set_capacity(nodeMetricRecord["DataItems"][0]["Host"], nodeMetricRecord["DataItems"][0]["Collections"][0]["Value"]) + end end nodeMetricRecord = KubernetesApiClient.parseNodeLimitsFromNodeItem(item, "capacity", "memory", "memoryCapacityBytes", batchTime) if !nodeMetricRecord.nil? && !nodeMetricRecord.empty? nodeMetricRecords.push(nodeMetricRecord) + # add data to the cache so filter_cadvisor2mdm.rb can use it + if is_windows_node + @NodeCache.mem.set_capacity(nodeMetricRecord["DataItems"][0]["Host"], nodeMetricRecord["DataItems"][0]["Collections"][0]["Value"]) + end end nodeMetricRecords.each do |metricRecord| metricRecord["DataType"] = "LINUX_PERF_BLOB" @@ -301,6 +321,9 @@ def parse_and_emit_records(nodeInventory, batchTime = Time.utc.iso8601) properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength properties["rsPromMonPodsLabelSelectorLength"] = @@rsPromMonitorPodsLabelSelectorLength properties["rsPromMonPodsFieldSelectorLength"] = @@rsPromMonitorPodsFieldSelectorLength + end + # telemetry about osm metric settings for replicaset + if (File.file?(@@osmConfigMountPath)) properties["osmNamespaceCount"] = @@osmNamespaceCount end ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties) @@ -492,4 +515,77 @@ def getNodeTelemetryProps(item) return properties end end # Kube_Node_Input + + + class NodeStatsCache + # inner class for caching implementation (CPU and memory caching is handled the exact same way, so logic to do so is moved to a private inner class) + # (to reduce code duplication) + class NodeCache + + @@RECORD_TIME_TO_LIVE = 60*20 # units are seconds, so clear the cache every 20 minutes. + + def initialize + @cacheHash = {} + @timeAdded = {} # records when an entry was last added + @lock = Mutex.new + @lastCacheClearTime = 0 + + @cacheHash.default = 0.0 + @lastCacheClearTime = DateTime.now.to_time.to_i + end + + def get_capacity(node_name) + @lock.synchronize do + retval = @cacheHash[node_name] + return retval + end + end + + def set_capacity(host, val) + # check here if the cache has not been cleaned in a while. This way calling code doesn't have to remember to clean the cache + current_time = DateTime.now.to_time.to_i + if current_time - @lastCacheClearTime > @@RECORD_TIME_TO_LIVE + clean_cache + @lastCacheClearTime = current_time + end + + @lock.synchronize do + @cacheHash[host] = val + @timeAdded[host] = current_time + end + end + + def clean_cache() + $log.info "in_kube_nodes::clean_cache: cleaning node cpu/mem cache" + cacheClearTime = DateTime.now.to_time.to_i + @lock.synchronize do + nodes_to_remove = [] # first make a list of nodes to remove, then remove them. This intermediate + # list is used so that we aren't modifying a hash while iterating through it. + @cacheHash.each do |key, val| + if cacheClearTime - @timeAdded[key] > @@RECORD_TIME_TO_LIVE + nodes_to_remove.append(key) + end + end + + nodes_to_remove.each do node_name + @cacheHash.delete(node_name) + @timeAdded.delete(node_name) + end + end + end + end # NodeCache + + + @@cpuCache = NodeCache.new + @@memCache = NodeCache.new + + def cpu() + return @@cpuCache + end + + def mem() + return @@memCache + end + end + end # module diff --git a/source/plugins/ruby/podinventory_to_mdm.rb b/source/plugins/ruby/podinventory_to_mdm.rb index 77370e284..d9cb71bd4 100644 --- a/source/plugins/ruby/podinventory_to_mdm.rb +++ b/source/plugins/ruby/podinventory_to_mdm.rb @@ -88,6 +88,7 @@ def initialize() @pod_count_by_phase = {} @pod_uids = {} @process_incoming_stream = CustomMetricsUtils.check_custom_metrics_availability + @metric_threshold_hash = MdmMetricsGenerator.getContainerResourceUtilizationThresholds @log.debug "After check_custom_metrics_availability process_incoming_stream #{@process_incoming_stream}" @log.debug { "Starting podinventory_to_mdm plugin" } end @@ -259,7 +260,7 @@ def process_record_for_terminated_job_metric(podControllerNameDimValue, podNames if !containerFinishedTime.nil? && !containerFinishedTime.empty? finishedTimeParsed = Time.parse(containerFinishedTime) # Check to see if job was completed 6 hours ago/STALE_JOB_TIME_IN_MINUTES - if ((Time.now - finishedTimeParsed) / 60) > Constants::STALE_JOB_TIME_IN_MINUTES + if ((Time.now - finishedTimeParsed) / 60) > @metric_threshold_hash[Constants::JOB_COMPLETION_TIME] MdmMetricsGenerator.generateStaleJobCountMetrics(podControllerNameDimValue, podNamespaceDimValue) end From fb43917369ea4cdfd3e54df238fcc076e7d78fd0 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Fri, 23 Apr 2021 10:51:00 -0700 Subject: [PATCH 13/18] David/release 04 22 2021 take2 (#547) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) * using latest stable version of msys2 (#465) * fixing the windows-perf-dups (#466) * chart updates related to new microsoft/charts repo (#467) * Changes for creating 11092020 release (#468) * MDM exception aggregation (#470) * grwehner/mdm custom metric regions (#471) Remove custom metrics region check for public cloud * updaitng rs limit to 1gb (#474) * grwehner/pv inventory (#455) Add fluentd plugin to request persistent volume info from the kubernetes api and send to LA * Gangams/fix for build release pipeline issue (#476) * use isolated cdpx acr * correct comment * add pv fluentd plugin config to helm rs config (#477) * add pv fluentd plugin to helm rs config * helm rbac permissions for pv api calls * Gangams/fix rs ooming (#473) * optimize kpi * optimize kube node inventory * add flags for events, deployments and hpa * have separate function parseNodeLimits * refactor code * fix crash * fix bug with service name * fix bugs related to get service name * update oom fix test agent * debug logs * fix service label issue * update to latest agent and enable ephemeral annotation * change stream size to 200 from 250 * update yaml * adjust chunksizes * add ruby gc env * yaml changes for cioomtest11282020-3 * telemetry to track pods latency * service count telemetry * rename variables * wip * nodes inventory telemetry * configmap changes * add emit streams in configmap * yaml updates * fix copy and paste bug * add todo comments * fix node latency telemetry bug * update yaml with latest test image * fix bug * upping rs memory change * fix mdm bug with final emit stream * update to latest image * fix pr feedback * fix pr feedback * rename health config to agent config * fix max allowed hpa chunk size * update to use 1k pod chunk since validated on 1.18+ * remove debug logs * minor updates * move defaults to common place * chart updates * final oomfix agent * update to use prod image so that can be validated with build pipeline * fix typo in comment * Gangams/enable arc onboarding to ff (#478) * wip * updates * trigger login if the ctx cloud not same as specified cloud * add missed commit * Convert PV type dictionary to json for telemetry so it shows up in logs (#480) * fix 2 windows tasks - 1) Dont log to termination log 2) enable ADX route for containerlogs in windows (for O365) (#482) * fix ci envvar collection in large pods (#483) * grwehner/jan agent tasks (#481) - Windows agent fix to use log filtering settings in config map. - Error handling for kubelet_utils get_node_capacity in case /metrics/cadvsior endpoint fails. - Remove env variable for workspace key for windows agent * updating fbit version and cpu limit (#485) * reverting to older version (#487) * Gangams/add fbsettings configurable via configmap (#486) * wip * fbit config settings * add config warn message * handle one config provided but not other * fixed pr feedback * fix copy paste error * rename config parameter names * fix typo * fix fbit crash in helm path * fix nil check * Gangams/jan agent release tasks (#484) * wip * explicit amd64 affinity for hybrid workloads * fix space issue * wip * revert vscode setting file * remove per container logs in ci (#488) * updates for ciprod01112021 release (#489) * new yaml files (#491) * Use cloud-specific instrumentation keys (#494) If APPLICATIONINSIGHTS_AUTH_URL is set/non-empty then the agent will now grab a custom IKey from a URL stored in APPLICATIONINSIGHTS_AUTH_URL * upgrade apt to latest version (#492) * upgrade apt to latest version * fix pr feedback * Gangams/add support for extension msi for arc k8s cluster (#495) * wip * add env var for the arc k8s extension name * chart update * extension msi updates * fix bug * revert chart and image to prod version * minor text changes * image tag to prod * wip * wip * wip * wip * final updates * fix whitespaces * simplify crd yaml * Gangams/arm template arc k8s extension (#496) * arm templates for arc k8s extension * update to use official extension type name * update * add identity property * add proxyendpointurl parameter * add default values * Gangams/aks monitoring via policy (#497) * enable monitoring through policy * wip * handle tags * wip * add alias * wip * working * updates * working * with deployment name * doc updates * doc updates * fix typo in the docs * revert to use operatingSystem from osImage for node os telemety (#498) * Container log v2 schema changes (#499) * make pod name in mdsd definition as str for consistency. msgp has no type checking, as it has type metadata in it the message itself. * Add priority class to the daemonsets (#500) * Add priority class to the daemonsets Add a priority class for omsagent and have the daemonsets use this to be sure to schedule the pods. Daemonset pods are constrained in scheduling to run on specific nodes. This is done by the daemonset controller. When a node shows up it will create a pod with a strong affinity to that node. When a node goes away, it will delete the pod with the node affinity to that node. Kubernetes pod scheduling does not know it is a daemonset but it does know it is tied to a specific node. With default scheduling, it is possible for the pods to be "frozen out" of a node because the node already is full. This can happen because "normal" pods may already exist and are looking for a node to get scheduled on when a node is added to the cluster. The daemonset controller will only first create the pod for the node at around the same time. The kubernetes scheduler is running async from all of this and thus there can be a race as to who gets scheduled on the node. The pod priority class (and thus the pod priority) is a way to indicate that the pod has a higher scheduling priority than a default pod. By default, all pods are at priority 0. Higher numbers are higher priority. Setting the priority to something greater than zero will allow the omsagent daemonsets to win a race against "normal" pods for scheduled resources on a node - and will also allow for graceful eviction in the case the node is too full. Without this, omsagent can be left out of node in clusters that are very busy, especially in dynamic scaling situations. I did not test the windows pod as we have no windows clusters. * CR feedback * fix node metric issue (#502) * Bug fixes for Feb release (#504) * bug fix for mdm metrics with no limits * fix exception bug * Gangams/feb 2021 agent bug fix (#505) * fix npe in getKubeServiceRecords * use image fields from spec * fix typo * cover all cases * handle scenario only digest specified * changes for release -ciprod02232021 (#506) * Gangams/e2e test framework (#503) * add agent e2e fw and tests * doc and script updates * add validation script * doc updates * yaml updates * fix typo * doc updates * more doc updates * add ISTEST for helm chart to use arc conf * refactor test code * fix pr feedback * fix pr feedback * fix pr feedback * fix pr feedback * scrape new kubelet pod count metric name (#508) * Adding explicit json output to az commands as the script fails if az is configured with Table output #409 (#513) * Gangams/arc proxy contract and token renewal updates (#511) * fix issue with crd status updates * handle renewal token delays * add proxy contract * updates for proxy cert for linux * remove proxycert related changes * fix whitespace issue * fix whitespace issue * remove proxy in arm template * doc updates for microsoft charts repo release (#512) * doc updates for microsoft charts repo release * wip * Update enable-monitoring.sh (#514) Line 314 and 343 seems to have trailing spaces for some subscriptions which is exiting the script even for valid scenarios Co-authored-by: Ganga Mahesh Siddem * Prometheus scraping from sidecar and OSM changes (#515) * add liveness timeout for exec (#518) * chart and other updates (#519) * Saaror osmdoc (#523) * Create ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Add files via upload * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * telemetry bug fix (#527) * Fix conflicting logrotate settings (#526) The node and the omsagent container both have a cron.daily file to rotate certain logs daily. These settings are the same for some files in /var/log (mounted from the node with read/write access), causing the rotation to fail when both try to rotate at the same time. So then the /var/log/*.1 file is written to forever. Since these files are always written to and never rotated, it causes high memory usage on the node after a while. This fix removes the container logrotate settings for /var/log, which the container does not write to. * bug fix (#528) * Gangams/arc ev2 deployment (#522) * ev2 deployment for arc k8s extension * fix charts path issue * rename scripts tar * add notifications * fix line endings * fix line endings * update with prod repo * fix file endings * added liveness and telemetry for telegraf (#517) * added liveness and telemetry for telegraf * code transfer * removed windows liveness probe * done * Windows metric fix (#530) * changes * about to remove container fix * moved caching code to existing loop * removed un-necessary changes * removed a few more un-necessary changes * added windows node check * fixed a bug * everything works confirmed * OSM doc update (#533) * Adding MDM metrics for threshold violation (#531) * Rashmi/april agent 2021 (#538) * add Read_from_Head config for all fluentbit tail plugins (#539) See the commit message of: fluent/fluent-bit@70e33fa for details explaining the fluentbit change and what Read_from_Head does when set to true. * fix programdata mount issue on containerd win nodes (#542) * Update sidecar mem limits (#541) * David/release 4 22 2021 (#544) * updating image tag and agent version * updated liveness probe * updated release notes again * fixed date in version file * 1m, 1m, 1s by default (#543) * 1m, 1m, 1s by default * setting default through a different method * getting rid of unwanted changes Co-authored-by: Ganga Mahesh Siddem Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner Co-authored-by: deagraw Co-authored-by: Michael Sinz <36865706+Michael-Sinz@users.noreply.github.com> Co-authored-by: Nicolas Yuen Co-authored-by: seenu433 --- .../scripts/td-agent-bit-conf-customizer.rb | 8 ++-- charts/azuremonitor-containers/values.yaml | 41 ------------------- .../onboarding/managed/enable-monitoring.ps1 | 4 +- source/plugins/go/src/oms.go | 3 -- source/plugins/ruby/MdmMetricsGenerator.rb | 14 ------- 5 files changed, 7 insertions(+), 63 deletions(-) diff --git a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb index 35b71e550..ea1536866 100644 --- a/build/common/installer/scripts/td-agent-bit-conf-customizer.rb +++ b/build/common/installer/scripts/td-agent-bit-conf-customizer.rb @@ -3,7 +3,9 @@ @td_agent_bit_conf_path = "/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf" -@default_service_interval = "15" +@default_service_interval = "1" +@default_buffer_chunk_size = "1" +@default_buffer_max_size = "1" def is_number?(value) true if Integer(value) rescue false @@ -21,9 +23,9 @@ def substituteFluentBitPlaceHolders serviceInterval = (!interval.nil? && is_number?(interval) && interval.to_i > 0 ) ? interval : @default_service_interval serviceIntervalSetting = "Flush " + serviceInterval - tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : nil + tailBufferChunkSize = (!bufferChunkSize.nil? && is_number?(bufferChunkSize) && bufferChunkSize.to_i > 0) ? bufferChunkSize : @default_buffer_chunk_size - tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : nil + tailBufferMaxSize = (!bufferMaxSize.nil? && is_number?(bufferMaxSize) && bufferMaxSize.to_i > 0) ? bufferMaxSize : @default_buffer_max_size = "1" if ((!tailBufferChunkSize.nil? && tailBufferMaxSize.nil?) || (!tailBufferChunkSize.nil? && !tailBufferMaxSize.nil? && tailBufferChunkSize.to_i > tailBufferMaxSize.to_i)) puts "config:warn buffer max size must be greater or equal to chunk size" diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 219198ebb..9dd5317a4 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -83,47 +83,6 @@ omsagent: affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - labelSelector: - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: type - operator: NotIn - values: - - virtual-kubelet - - key: kubernetes.io/arch - operator: In - values: - - amd64 - nodeSelectorTerms: - - labelSelector: - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: type - operator: NotIn - values: - - virtual-kubelet - nodeSelectorTerms: - - labelSelector: - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - - key: type - operator: NotIn - values: - - virtual-kubelet - - key: kubernetes.io/arch - operator: In - values: - - amd64 nodeSelectorTerms: - labelSelector: matchExpressions: diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 02ea73267..828d061ac 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -46,9 +46,9 @@ param( [Parameter(mandatory = $false)] [string]$kubeContext, [Parameter(mandatory = $false)] - [string]$tenantId, + [string]$workspaceResourceId, [Parameter(mandatory = $false)] - [string]$kubeContext, + [string]$proxyEndpoint, [Parameter(mandatory = $false)] [string]$azureCloudName ) diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index 0be806838..d35acad3d 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -919,9 +919,6 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } else { stringMap["AzureResourceId"] = "" } - stringMap["PodName"] = k8sPodName - stringMap["PodNamespace"] = k8sNamespace - stringMap["ContainerName"] = containerName dataItemADX = DataItemADX{ TimeGenerated: stringMap["TimeGenerated"], Computer: stringMap["Computer"], diff --git a/source/plugins/ruby/MdmMetricsGenerator.rb b/source/plugins/ruby/MdmMetricsGenerator.rb index 199cacd37..6641456af 100644 --- a/source/plugins/ruby/MdmMetricsGenerator.rb +++ b/source/plugins/ruby/MdmMetricsGenerator.rb @@ -244,20 +244,6 @@ def zeroFillMetricRecords(records, batch_time) end } end - - pvZeroFillDims = {} - pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_PVC_NAMESPACE] = Constants::KUBESYSTEM_NAMESPACE_ZERO_FILL - pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_POD_NAME] = Constants::OMSAGENT_ZERO_FILL - pvZeroFillDims[Constants::INSIGHTSMETRICS_TAGS_VOLUME_NAME] = Constants::VOLUME_NAME_ZERO_FILL - pvResourceUtilMetricRecord = getPVResourceUtilMetricRecords(batch_time, - Constants::PV_USED_BYTES, - @@hostName, - 0, - pvZeroFillDims, - metric_threshold_hash[Constants::PV_USED_BYTES]) - if !pvResourceUtilMetricRecord.nil? && !pvResourceUtilMetricRecord.empty? && !pvResourceUtilMetricRecord[0].nil? && !pvResourceUtilMetricRecord[0].empty? - records.push(pvResourceUtilMetricRecord[0]) - end rescue => errorStr @log.info "Error in zeroFillMetricRecords: #{errorStr}" ApplicationInsightsUtility.sendExceptionTelemetry(errorStr) From 643295d9b85efce5123fdae523d848aa8a5e3da0 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Wed, 12 May 2021 17:18:05 -0700 Subject: [PATCH 14/18] David/aad stage 1 release (#557) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) * using latest stable version of msys2 (#465) * fixing the windows-perf-dups (#466) * chart updates related to new microsoft/charts repo (#467) * Changes for creating 11092020 release (#468) * MDM exception aggregation (#470) * grwehner/mdm custom metric regions (#471) Remove custom metrics region check for public cloud * updaitng rs limit to 1gb (#474) * grwehner/pv inventory (#455) Add fluentd plugin to request persistent volume info from the kubernetes api and send to LA * Gangams/fix for build release pipeline issue (#476) * use isolated cdpx acr * correct comment * add pv fluentd plugin config to helm rs config (#477) * add pv fluentd plugin to helm rs config * helm rbac permissions for pv api calls * Gangams/fix rs ooming (#473) * optimize kpi * optimize kube node inventory * add flags for events, deployments and hpa * have separate function parseNodeLimits * refactor code * fix crash * fix bug with service name * fix bugs related to get service name * update oom fix test agent * debug logs * fix service label issue * update to latest agent and enable ephemeral annotation * change stream size to 200 from 250 * update yaml * adjust chunksizes * add ruby gc env * yaml changes for cioomtest11282020-3 * telemetry to track pods latency * service count telemetry * rename variables * wip * nodes inventory telemetry * configmap changes * add emit streams in configmap * yaml updates * fix copy and paste bug * add todo comments * fix node latency telemetry bug * update yaml with latest test image * fix bug * upping rs memory change * fix mdm bug with final emit stream * update to latest image * fix pr feedback * fix pr feedback * rename health config to agent config * fix max allowed hpa chunk size * update to use 1k pod chunk since validated on 1.18+ * remove debug logs * minor updates * move defaults to common place * chart updates * final oomfix agent * update to use prod image so that can be validated with build pipeline * fix typo in comment * Gangams/enable arc onboarding to ff (#478) * wip * updates * trigger login if the ctx cloud not same as specified cloud * add missed commit * Convert PV type dictionary to json for telemetry so it shows up in logs (#480) * fix 2 windows tasks - 1) Dont log to termination log 2) enable ADX route for containerlogs in windows (for O365) (#482) * fix ci envvar collection in large pods (#483) * grwehner/jan agent tasks (#481) - Windows agent fix to use log filtering settings in config map. - Error handling for kubelet_utils get_node_capacity in case /metrics/cadvsior endpoint fails. - Remove env variable for workspace key for windows agent * updating fbit version and cpu limit (#485) * reverting to older version (#487) * Gangams/add fbsettings configurable via configmap (#486) * wip * fbit config settings * add config warn message * handle one config provided but not other * fixed pr feedback * fix copy paste error * rename config parameter names * fix typo * fix fbit crash in helm path * fix nil check * Gangams/jan agent release tasks (#484) * wip * explicit amd64 affinity for hybrid workloads * fix space issue * wip * revert vscode setting file * remove per container logs in ci (#488) * updates for ciprod01112021 release (#489) * new yaml files (#491) * Use cloud-specific instrumentation keys (#494) If APPLICATIONINSIGHTS_AUTH_URL is set/non-empty then the agent will now grab a custom IKey from a URL stored in APPLICATIONINSIGHTS_AUTH_URL * upgrade apt to latest version (#492) * upgrade apt to latest version * fix pr feedback * Gangams/add support for extension msi for arc k8s cluster (#495) * wip * add env var for the arc k8s extension name * chart update * extension msi updates * fix bug * revert chart and image to prod version * minor text changes * image tag to prod * wip * wip * wip * wip * final updates * fix whitespaces * simplify crd yaml * Gangams/arm template arc k8s extension (#496) * arm templates for arc k8s extension * update to use official extension type name * update * add identity property * add proxyendpointurl parameter * add default values * Gangams/aks monitoring via policy (#497) * enable monitoring through policy * wip * handle tags * wip * add alias * wip * working * updates * working * with deployment name * doc updates * doc updates * fix typo in the docs * revert to use operatingSystem from osImage for node os telemety (#498) * Container log v2 schema changes (#499) * make pod name in mdsd definition as str for consistency. msgp has no type checking, as it has type metadata in it the message itself. * Add priority class to the daemonsets (#500) * Add priority class to the daemonsets Add a priority class for omsagent and have the daemonsets use this to be sure to schedule the pods. Daemonset pods are constrained in scheduling to run on specific nodes. This is done by the daemonset controller. When a node shows up it will create a pod with a strong affinity to that node. When a node goes away, it will delete the pod with the node affinity to that node. Kubernetes pod scheduling does not know it is a daemonset but it does know it is tied to a specific node. With default scheduling, it is possible for the pods to be "frozen out" of a node because the node already is full. This can happen because "normal" pods may already exist and are looking for a node to get scheduled on when a node is added to the cluster. The daemonset controller will only first create the pod for the node at around the same time. The kubernetes scheduler is running async from all of this and thus there can be a race as to who gets scheduled on the node. The pod priority class (and thus the pod priority) is a way to indicate that the pod has a higher scheduling priority than a default pod. By default, all pods are at priority 0. Higher numbers are higher priority. Setting the priority to something greater than zero will allow the omsagent daemonsets to win a race against "normal" pods for scheduled resources on a node - and will also allow for graceful eviction in the case the node is too full. Without this, omsagent can be left out of node in clusters that are very busy, especially in dynamic scaling situations. I did not test the windows pod as we have no windows clusters. * CR feedback * fix node metric issue (#502) * Bug fixes for Feb release (#504) * bug fix for mdm metrics with no limits * fix exception bug * Gangams/feb 2021 agent bug fix (#505) * fix npe in getKubeServiceRecords * use image fields from spec * fix typo * cover all cases * handle scenario only digest specified * changes for release -ciprod02232021 (#506) * Gangams/e2e test framework (#503) * add agent e2e fw and tests * doc and script updates * add validation script * doc updates * yaml updates * fix typo * doc updates * more doc updates * add ISTEST for helm chart to use arc conf * refactor test code * fix pr feedback * fix pr feedback * fix pr feedback * fix pr feedback * scrape new kubelet pod count metric name (#508) * Adding explicit json output to az commands as the script fails if az is configured with Table output #409 (#513) * Gangams/arc proxy contract and token renewal updates (#511) * fix issue with crd status updates * handle renewal token delays * add proxy contract * updates for proxy cert for linux * remove proxycert related changes * fix whitespace issue * fix whitespace issue * remove proxy in arm template * doc updates for microsoft charts repo release (#512) * doc updates for microsoft charts repo release * wip * Update enable-monitoring.sh (#514) Line 314 and 343 seems to have trailing spaces for some subscriptions which is exiting the script even for valid scenarios Co-authored-by: Ganga Mahesh Siddem * Prometheus scraping from sidecar and OSM changes (#515) * add liveness timeout for exec (#518) * chart and other updates (#519) * Saaror osmdoc (#523) * Create ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Add files via upload * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * telemetry bug fix (#527) * Fix conflicting logrotate settings (#526) The node and the omsagent container both have a cron.daily file to rotate certain logs daily. These settings are the same for some files in /var/log (mounted from the node with read/write access), causing the rotation to fail when both try to rotate at the same time. So then the /var/log/*.1 file is written to forever. Since these files are always written to and never rotated, it causes high memory usage on the node after a while. This fix removes the container logrotate settings for /var/log, which the container does not write to. * bug fix (#528) * Gangams/arc ev2 deployment (#522) * ev2 deployment for arc k8s extension * fix charts path issue * rename scripts tar * add notifications * fix line endings * fix line endings * update with prod repo * fix file endings * added liveness and telemetry for telegraf (#517) * added liveness and telemetry for telegraf * code transfer * removed windows liveness probe * done * Windows metric fix (#530) * changes * about to remove container fix * moved caching code to existing loop * removed un-necessary changes * removed a few more un-necessary changes * added windows node check * fixed a bug * everything works confirmed * OSM doc update (#533) * Adding MDM metrics for threshold violation (#531) * Rashmi/april agent 2021 (#538) * add Read_from_Head config for all fluentbit tail plugins (#539) See the commit message of: fluent/fluent-bit@70e33fa for details explaining the fluentbit change and what Read_from_Head does when set to true. * fix programdata mount issue on containerd win nodes (#542) * Update sidecar mem limits (#541) * David/release 4 22 2021 (#544) * updating image tag and agent version * updated liveness probe * updated release notes again * fixed date in version file * 1m, 1m, 1s by default (#543) * 1m, 1m, 1s by default * setting default through a different method * update to latest omsagent, add eastus2 to mdsd regions * copied oneagent bits to a CI repository release * mdsd inmem mode * yaml for cl scale test * yaml for cl scale test * reverting dockerProviderVersion version to 15.0.0 * prepping for release (updated image version, dockerProviderVersion, and release notes * container log scaletest yamls * forgot to update image version in chart * fixing windows tag in dockerfile, changing release notes wording * missed windows tag in one more place * forgot to change the windows dockerProviderVersion back * Update ReleaseNotes.md fix image tag in releasenotes * Update ReleaseNotes.md fix imagetag in release notes this time right Co-authored-by: Ganga Mahesh Siddem Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner Co-authored-by: deagraw Co-authored-by: Michael Sinz <36865706+Michael-Sinz@users.noreply.github.com> Co-authored-by: Nicolas Yuen Co-authored-by: seenu433 --- ReleaseNotes.md | 9 ++- .../linux/installer/scripts/livenessprobe.sh | 2 +- build/version | 4 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/envmdsd | 4 ++ kubernetes/linux/main.sh | 2 +- kubernetes/linux/setup.sh | 4 +- kubernetes/omsagent.yaml | 12 ++-- .../400logspersec-2klogentrysize.yaml | 60 +++++++++++++++++++ .../400logspersec-5klogentrysize.yaml | 60 +++++++++++++++++++ .../ci-log-scale-4kpersec-5klogline.yaml | 60 +++++++++++++++++++ 11 files changed, 205 insertions(+), 14 deletions(-) create mode 100644 test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml create mode 100644 test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml create mode 100644 test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml diff --git a/ReleaseNotes.md b/ReleaseNotes.md index acbd579a0..b4c0d6ba4 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,14 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) + +### 05/12/2021 - +##### Version microsoft/oms:ciprod00512021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021 (linux) +##### No Windows changes with this release, win-ciprod04222021 still current. +##### Code change log +- Upgrading oneagent to version 1.8 (only for Linux) +- Enabling oneagent for container logs for East US 2 + ### 04/22/2021 - ##### Version microsoft/oms:ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021 (linux) ##### Version microsoft/oms:win-ciprod04222021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod04222021 (windows) @@ -27,7 +35,6 @@ Note : The agent version(s) below has dates (ciprod), which indicate t - Doc updates - Minor telemetry changes - ### 03/26/2021 - ##### Version microsoft/oms:ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod03262021 (linux) ##### Version microsoft/oms:win-ciprod03262021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod03262021 (windows) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index e3b0fc28e..198b4e87f 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -10,7 +10,7 @@ fi #optionally test to exit non zero value if oneagent is not running if [ -e "/opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2" ]; then - (ps -ef | grep "mdsd -l" | grep -v "grep") + (ps -ef | grep "mdsd" | grep -v "grep") if [ $? -ne 0 ] then echo "oneagent is not running" > /dev/termination-log diff --git a/build/version b/build/version index 16a43604a..81bb808f5 100644 --- a/build/version +++ b/build/version @@ -3,10 +3,10 @@ # Build Version Information CONTAINER_BUILDVERSION_MAJOR=15 -CONTAINER_BUILDVERSION_MINOR=0 +CONTAINER_BUILDVERSION_MINOR=1 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 -CONTAINER_BUILDVERSION_DATE=20210422 +CONTAINER_BUILDVERSION_DATE=20210512 CONTAINER_BUILDVERSION_STATUS=Developer_Build #-------------------------------- End of File ----------------------------------- diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index d5ece4509..822e52bc8 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod04222021 +ARG IMAGE_TAG=ciprod05122021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/envmdsd b/kubernetes/linux/envmdsd index e4886012e..3f834bfb8 100644 --- a/kubernetes/linux/envmdsd +++ b/kubernetes/linux/envmdsd @@ -12,3 +12,7 @@ export HOSTNAME_OVERRIDE="${NODE_NAME}" export MDSD_TCMALLOC_RELEASE_FREQ_SEC=1 export MDSD_COMPRESSION_ALGORITHM=LZ4 export SSL_CERT_DIR="/etc/ssl/certs" +# increase the size of msgpack items mdsd will accept, otherwise they will be silently dropped. These values were arbitrairly chosen to be 10 or 100 times larger than the defaults. +export MDSD_MSGPACK_ARRAY_SIZE_ITEMS=10000000 +export MDSD_MSGPACK_MAP_SIZE_ITEMS=10000000 +export MDSD_MSGPACK_NESTING_LEVEL=100 diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index 81db6f3a4..f03318ad1 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -581,7 +581,7 @@ if [ ! -e "/etc/config/kube.conf" ] && [ "${CONTAINER_TYPE}" != "PrometheusSidec dpkg -l | grep mdsd | awk '{print $2 " " $3}' echo "starting mdsd ..." - mdsd -l -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & + mdsd -e ${MDSD_LOG}/mdsd.err -w ${MDSD_LOG}/mdsd.warn -o ${MDSD_LOG}/mdsd.info -q ${MDSD_LOG}/mdsd.qos & touch /opt/AZMON_CONTAINER_LOGS_EFFECTIVE_ROUTE_V2 fi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index ee3756964..f065cc165 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -31,8 +31,8 @@ mv $TMPDIR/omsbundle* $TMPDIR/omsbundle /usr/bin/dpkg -i $TMPDIR/omsbundle/110/omsagent*.deb #/usr/bin/dpkg -i $TMPDIR/omsbundle/100/omsconfig*.deb -#install oneagent - Official bits (10/18) -wget https://github.com/microsoft/Docker-Provider/releases/download/10182020-oneagent/azure-mdsd_1.5.126-build.master.99_x86_64.deb +#install oneagent - Official bits (05/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/05112021-oneagent/azure-mdsd_1.8.0-build.master.189_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d cp -f $TMPDIR/envmdsd /etc/mdsd.d diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index feea3f29a..bf94490ba 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.0.0-0" + dockerProviderVersion: "15.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" imagePullPolicy: IfNotPresent resources: limits: @@ -399,7 +399,7 @@ spec: - name: USER_ASSIGNED_IDENTITY_CLIENT_ID value: "" - name: AZMON_CONTAINERLOGS_ONEAGENT_REGIONS - value: "koreacentral,norwayeast" + value: "koreacentral,norwayeast,eastus2" securityContext: privileged: true ports: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" imagePullPolicy: IfNotPresent resources: limits: @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.0.0-0" + dockerProviderVersion: "15.1.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod04222021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" imagePullPolicy: IfNotPresent resources: limits: diff --git a/test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml b/test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml new file mode 100644 index 000000000..cc3dd5259 --- /dev/null +++ b/test/containerlog-scale-tests/400logspersec-2klogentrysize.yaml @@ -0,0 +1,60 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: logs-400persec-2kentrysize +spec: + parallelism: 1 + completions: 1 + template: + metadata: + name: logs-400persec-2kentrysize + spec: + volumes: + - name: logs-400persec-2kentrysize-scripts-volume + configMap: + name: logs-400persec-test-scripts + containers: + - name: logs-400persec-2kentrysize + image: ubuntu + volumeMounts: + - mountPath: /logs-400persec-test-scripts + name: logs-400persec-2kentrysize-scripts-volume + env: + - name: HOME + value: /tmp + command: + - /bin/sh + - -c + - | + echo "scripts in /logs-400persec-test-scripts" + ls -lh /logs-400persec-test-scripts + echo "copy scripts to /tmp" + cp /logs-400persec-test-scripts/*.sh /tmp + echo "apply 'chmod +x' to /tmp/*.sh" + chmod +x /tmp/*.sh + echo "script.sh in /tmp" + ls -lh /tmp + /tmp/script.sh + restartPolicy: Never +--- +apiVersion: v1 +items: +- apiVersion: v1 + data: + script.sh: | + #!/bin/bash + logentry='' + for var in {1..400..1} + do + logentry="${logentry}Test-" + done + for var in {1..200000..1} + do + echo $(date "+%Y/%m/%d %H:%M:%S.%3N") ${var}: $logentry + done + kind: ConfigMap + metadata: + creationTimestamp: null + name: logs-400persec-test-scripts +kind: List +metadata: {} diff --git a/test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml b/test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml new file mode 100644 index 000000000..42188631a --- /dev/null +++ b/test/containerlog-scale-tests/400logspersec-5klogentrysize.yaml @@ -0,0 +1,60 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: logs-400persec-5kentrysize +spec: + parallelism: 1 + completions: 1 + template: + metadata: + name: logs-400persec-5kentrysize + spec: + volumes: + - name: logs-400persec-5kentrysize-scripts-volume + configMap: + name: logs-400persec-5kentrysize-test-scripts + containers: + - name: logs-400persec-5kentrysize + image: ubuntu + volumeMounts: + - mountPath: /logs-400persec-5kentrysize-test-scripts + name: logs-400persec-5kentrysize-scripts-volume + env: + - name: HOME + value: /tmp + command: + - /bin/sh + - -c + - | + echo "scripts in /logs-400persec-5kentrysize-test-scripts" + ls -lh /logs-400persec-5kentrysize-test-scripts + echo "copy scripts to /tmp" + cp /logs-400persec-5kentrysize-test-scripts/*.sh /tmp + echo "apply 'chmod +x' to /tmp/*.sh" + chmod +x /tmp/*.sh + echo "script.sh in /tmp" + ls -lh /tmp + /tmp/script.sh + restartPolicy: Never +--- +apiVersion: v1 +items: +- apiVersion: v1 + data: + script.sh: | + #!/bin/bash + logentry='' + for var in {1..1024..1} + do + logentry="${logentry}Test-" + done + for var in {1..200000..1} + do + echo $(date "+%Y/%m/%d %H:%M:%S.%3N") ${var}: $logentry + done + kind: ConfigMap + metadata: + creationTimestamp: null + name: logs-400persec-5kentrysize-test-scripts +kind: List +metadata: {} diff --git a/test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml b/test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml new file mode 100644 index 000000000..ff619a822 --- /dev/null +++ b/test/containerlog-scale-tests/ci-log-scale-4kpersec-5klogline.yaml @@ -0,0 +1,60 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: ci-log-scale +spec: + parallelism: 1 + completions: 1 + template: + metadata: + name: ci-log-scale + spec: + volumes: + - name: ci-log-scale-scripts-volume + configMap: + name: test-scripts + containers: + - name: ci-log-scale + image: ubuntu + volumeMounts: + - mountPath: /test-scripts + name: ci-log-scale-scripts-volume + env: + - name: HOME + value: /tmp + command: + - /bin/sh + - -c + - | + echo "scripts in /test-scripts" + ls -lh /test-scripts + echo "copy scripts to /tmp" + cp /test-scripts/*.sh /tmp + echo "apply 'chmod +x' to /tmp/*.sh" + chmod +x /tmp/*.sh + echo "script.sh in /tmp" + ls -lh /tmp + /tmp/script.sh + restartPolicy: Never +--- +apiVersion: v1 +items: +- apiVersion: v1 + data: + script.sh: | + #!/bin/bash + logentry='' + for var in {1..1024..1} + do + logentry="${logentry}Test-" + done + for var in {1..200000..1} + do + echo $(date "+%Y/%m/%d %H:%M:%S.%3N") ${var}: $logentry + done + kind: ConfigMap + metadata: + creationTimestamp: null + name: test-scripts +kind: List +metadata: {} From afdf5d21dceac7d27ac56f29d86f2ccf9714fa49 Mon Sep 17 00:00:00 2001 From: David Michelman Date: Thu, 20 May 2021 13:48:21 -0700 Subject: [PATCH 15/18] Release 05-20-2021 (#564) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update * Gangams/cluster creation scripts (#414) * onprem k8s script * script updates * scripts for creating non-aks clusters * fix minor text update * updates * script updates * fix * script updates * fix scripts to install docker * fix: Pin to a particular version of ltsc2019 by SHA (#427) * enable collecting npm metrics (optionally) (#425) * enable collecting npm metrics (optionally) * fix default enrichment value * fix adx * Saaror patch 3 (#426) * Create README.MD Creating content for Kubecon lab * Update README.MD * Update README.MD * Gangams/add containerd support to windows agent (#428) * wip * wip * wip * wip * bug fix related to uri * wip * wip * fix bug with ignore cert validation * logic to ignore cert validation * minor * fix minor debug log issue * improve log message * debug message * fix bug with nullorempty check * remove debug statements * refactor parsers * add debug message * clean up * chart updates * fix formatting issues * Gangams/arc k8s metrics (#413) * cluster identity token * wip * fix exception * fix exceptions * fix exception * fix bug * fix bug * minor update * refactor the code * more refactoring * fix bug * typo fix * fix typo * wait for 1min after token renewal request * add proxy support for arc k8s mdm endpoint * avoid additional get call * minor line ending fix * wip * have separate log for arc k8s cluster identity * fix bug on creating crd resource * remove update permission since not required * fixed some bugs * fix pr feedback * remove list since its not required * fix: Reverting back to ltsc2019 tag (#429) * more kubelet metrics (#430) * more kubelet metrics * celan up new config * fix nom issue when config is empty (#432) * support multiple docker paths when docker root is updated thru knode (#433) * Gangams/doc and other related updates (#434) * bring back nodeslector changes for windows agent ds * readme updates * chart updates for azure cluster resourceid and region * set cluster region during onboarding for managed clusters * wip * fix for onboarding script * add sp support for the login * update help * add sp support for powershell * script updates for sp login * wip * wip * wip * readme updates * update the links to use ci_prod branch * fix links * fix image link * some more readme updates * add missing serviceprincipal in ps scripts (#435) * fix telemetry bug (#436) * Gangams/readmeupdates non aks 09162020 (#437) * changes for ciprod09162020 non-aks release * fix script to handle cross sub scenario * fix minor comment * fix date in version file * fix pr comments * Gangams/fix weird conflicts (#439) * separate build yamls for ci_prod branch (#415) (#416) * [Merge] dev to prod for ciprod08072020 release (#424) * separate build yamls for ci_prod branch (#415) * re-enable adx path (#420) * Gangams/release changes (#419) * updates related to release * updates related to release * fix the incorrect version * fix pr feedback * fix some typos in the release notes * fix for zero filled metrics (#423) * consolidate windows agent image docker files (#422) * consolidate windows agent image docker files * revert docker file consolidation * revert readme updates * merge back windows dockerfiles * image tag update Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar * fix quote issue for the region (#441) * fix cpucapacity/limit bug (#442) * grwehner/pv-usage-metrics (#431) - Send persistent volume usage and capacity metrics to LA for PVs with PVCs at the pod level; config to include or exclude kube-system namespace. - Send PV usage percentage to MDM if over the configurable threshold. - Add PV usage recommended alert template. * add new custom metric regions (#444) * add new custom metric regions * fix commas * add 'Terminating' state (#443) * Gangams/sept agent release tasks (#445) * turnoff mdm nonsupported cluster types * enable validation of server cert for ai ruby http client * add kubelet operations total and total error metrics * node selector label change * label update * wip * wip * wip * revert quotes * grwehner/pv-collect-volume-name (#448) Collect and send the volume name as another tag for pvUsedBytes in InsightsMetrics, so that it can be displayed in the workload workbook. Does not affect the PV MDM metric * Changes for september agent release (#449) Moving from v1beta1 to v1 for health CRD Adding timer for zero filling Adding zero filling for PV metrics * Gangams/arc k8s related scripts, charts and doc updates (#450) * checksum annotations * script update for chart from mcr * chart updates * update chart version to match with chart release * script updates * latest chart updates * version updates for chart release * script updates * script updates * doc updates * doc updates * update comments * fix bug in ps script * fix bug in ps script * minor update * release process updates * use consistent name across scripts * use consistent names * Install CA certs from wireserver (#451) * grwehner/pv-volume-name-in-mdm (#452) Add volume name for PV to mdm dimensions and zero fill it * Release changes for 10052020 release (#453) * Release changes for 10052020 release * remove redundant kubelet metrics as part of PR feedback * Update onboarding_instructions.md (#456) * Update onboarding_instructions.md Updated the documentation to reflect where to update the config map. * Update onboarding_instructions.md * Update onboarding_instructions.md * Update onboarding_instructions.md Updated the link * chart update for sept2020 release (#457) * add missing version update in the script (#458) * November release fixes - activate one agent, adx schema v2, win perf issue, syslog deactivation (#459) * activate one agent, adx schema v2, win perf issue, syslog deactivation * update chart * remove hiphen for params in chart (#462) Merging as its a simple fix (remove hiphen) * Changes for cutting a new build for ciprod10272020 release (#460) * using latest stable version of msys2 (#465) * fixing the windows-perf-dups (#466) * chart updates related to new microsoft/charts repo (#467) * Changes for creating 11092020 release (#468) * MDM exception aggregation (#470) * grwehner/mdm custom metric regions (#471) Remove custom metrics region check for public cloud * updaitng rs limit to 1gb (#474) * grwehner/pv inventory (#455) Add fluentd plugin to request persistent volume info from the kubernetes api and send to LA * Gangams/fix for build release pipeline issue (#476) * use isolated cdpx acr * correct comment * add pv fluentd plugin config to helm rs config (#477) * add pv fluentd plugin to helm rs config * helm rbac permissions for pv api calls * Gangams/fix rs ooming (#473) * optimize kpi * optimize kube node inventory * add flags for events, deployments and hpa * have separate function parseNodeLimits * refactor code * fix crash * fix bug with service name * fix bugs related to get service name * update oom fix test agent * debug logs * fix service label issue * update to latest agent and enable ephemeral annotation * change stream size to 200 from 250 * update yaml * adjust chunksizes * add ruby gc env * yaml changes for cioomtest11282020-3 * telemetry to track pods latency * service count telemetry * rename variables * wip * nodes inventory telemetry * configmap changes * add emit streams in configmap * yaml updates * fix copy and paste bug * add todo comments * fix node latency telemetry bug * update yaml with latest test image * fix bug * upping rs memory change * fix mdm bug with final emit stream * update to latest image * fix pr feedback * fix pr feedback * rename health config to agent config * fix max allowed hpa chunk size * update to use 1k pod chunk since validated on 1.18+ * remove debug logs * minor updates * move defaults to common place * chart updates * final oomfix agent * update to use prod image so that can be validated with build pipeline * fix typo in comment * Gangams/enable arc onboarding to ff (#478) * wip * updates * trigger login if the ctx cloud not same as specified cloud * add missed commit * Convert PV type dictionary to json for telemetry so it shows up in logs (#480) * fix 2 windows tasks - 1) Dont log to termination log 2) enable ADX route for containerlogs in windows (for O365) (#482) * fix ci envvar collection in large pods (#483) * grwehner/jan agent tasks (#481) - Windows agent fix to use log filtering settings in config map. - Error handling for kubelet_utils get_node_capacity in case /metrics/cadvsior endpoint fails. - Remove env variable for workspace key for windows agent * updating fbit version and cpu limit (#485) * reverting to older version (#487) * Gangams/add fbsettings configurable via configmap (#486) * wip * fbit config settings * add config warn message * handle one config provided but not other * fixed pr feedback * fix copy paste error * rename config parameter names * fix typo * fix fbit crash in helm path * fix nil check * Gangams/jan agent release tasks (#484) * wip * explicit amd64 affinity for hybrid workloads * fix space issue * wip * revert vscode setting file * remove per container logs in ci (#488) * updates for ciprod01112021 release (#489) * new yaml files (#491) * Use cloud-specific instrumentation keys (#494) If APPLICATIONINSIGHTS_AUTH_URL is set/non-empty then the agent will now grab a custom IKey from a URL stored in APPLICATIONINSIGHTS_AUTH_URL * upgrade apt to latest version (#492) * upgrade apt to latest version * fix pr feedback * Gangams/add support for extension msi for arc k8s cluster (#495) * wip * add env var for the arc k8s extension name * chart update * extension msi updates * fix bug * revert chart and image to prod version * minor text changes * image tag to prod * wip * wip * wip * wip * final updates * fix whitespaces * simplify crd yaml * Gangams/arm template arc k8s extension (#496) * arm templates for arc k8s extension * update to use official extension type name * update * add identity property * add proxyendpointurl parameter * add default values * Gangams/aks monitoring via policy (#497) * enable monitoring through policy * wip * handle tags * wip * add alias * wip * working * updates * working * with deployment name * doc updates * doc updates * fix typo in the docs * revert to use operatingSystem from osImage for node os telemety (#498) * Container log v2 schema changes (#499) * make pod name in mdsd definition as str for consistency. msgp has no type checking, as it has type metadata in it the message itself. * Add priority class to the daemonsets (#500) * Add priority class to the daemonsets Add a priority class for omsagent and have the daemonsets use this to be sure to schedule the pods. Daemonset pods are constrained in scheduling to run on specific nodes. This is done by the daemonset controller. When a node shows up it will create a pod with a strong affinity to that node. When a node goes away, it will delete the pod with the node affinity to that node. Kubernetes pod scheduling does not know it is a daemonset but it does know it is tied to a specific node. With default scheduling, it is possible for the pods to be "frozen out" of a node because the node already is full. This can happen because "normal" pods may already exist and are looking for a node to get scheduled on when a node is added to the cluster. The daemonset controller will only first create the pod for the node at around the same time. The kubernetes scheduler is running async from all of this and thus there can be a race as to who gets scheduled on the node. The pod priority class (and thus the pod priority) is a way to indicate that the pod has a higher scheduling priority than a default pod. By default, all pods are at priority 0. Higher numbers are higher priority. Setting the priority to something greater than zero will allow the omsagent daemonsets to win a race against "normal" pods for scheduled resources on a node - and will also allow for graceful eviction in the case the node is too full. Without this, omsagent can be left out of node in clusters that are very busy, especially in dynamic scaling situations. I did not test the windows pod as we have no windows clusters. * CR feedback * fix node metric issue (#502) * Bug fixes for Feb release (#504) * bug fix for mdm metrics with no limits * fix exception bug * Gangams/feb 2021 agent bug fix (#505) * fix npe in getKubeServiceRecords * use image fields from spec * fix typo * cover all cases * handle scenario only digest specified * changes for release -ciprod02232021 (#506) * Gangams/e2e test framework (#503) * add agent e2e fw and tests * doc and script updates * add validation script * doc updates * yaml updates * fix typo * doc updates * more doc updates * add ISTEST for helm chart to use arc conf * refactor test code * fix pr feedback * fix pr feedback * fix pr feedback * fix pr feedback * scrape new kubelet pod count metric name (#508) * Adding explicit json output to az commands as the script fails if az is configured with Table output #409 (#513) * Gangams/arc proxy contract and token renewal updates (#511) * fix issue with crd status updates * handle renewal token delays * add proxy contract * updates for proxy cert for linux * remove proxycert related changes * fix whitespace issue * fix whitespace issue * remove proxy in arm template * doc updates for microsoft charts repo release (#512) * doc updates for microsoft charts repo release * wip * Update enable-monitoring.sh (#514) Line 314 and 343 seems to have trailing spaces for some subscriptions which is exiting the script even for valid scenarios Co-authored-by: Ganga Mahesh Siddem * Prometheus scraping from sidecar and OSM changes (#515) * add liveness timeout for exec (#518) * chart and other updates (#519) * Saaror osmdoc (#523) * Create ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Add files via upload * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * Update ReadMe.md * telemetry bug fix (#527) * Fix conflicting logrotate settings (#526) The node and the omsagent container both have a cron.daily file to rotate certain logs daily. These settings are the same for some files in /var/log (mounted from the node with read/write access), causing the rotation to fail when both try to rotate at the same time. So then the /var/log/*.1 file is written to forever. Since these files are always written to and never rotated, it causes high memory usage on the node after a while. This fix removes the container logrotate settings for /var/log, which the container does not write to. * bug fix (#528) * Gangams/arc ev2 deployment (#522) * ev2 deployment for arc k8s extension * fix charts path issue * rename scripts tar * add notifications * fix line endings * fix line endings * update with prod repo * fix file endings * added liveness and telemetry for telegraf (#517) * added liveness and telemetry for telegraf * code transfer * removed windows liveness probe * done * Windows metric fix (#530) * changes * about to remove container fix * moved caching code to existing loop * removed un-necessary changes * removed a few more un-necessary changes * added windows node check * fixed a bug * everything works confirmed * OSM doc update (#533) * Adding MDM metrics for threshold violation (#531) * Rashmi/april agent 2021 (#538) * add Read_from_Head config for all fluentbit tail plugins (#539) See the commit message of: fluent/fluent-bit@70e33fa for details explaining the fluentbit change and what Read_from_Head does when set to true. * fix programdata mount issue on containerd win nodes (#542) * Update sidecar mem limits (#541) * David/release 4 22 2021 (#544) * updating image tag and agent version * updated liveness probe * updated release notes again * fixed date in version file * 1m, 1m, 1s by default (#543) * 1m, 1m, 1s by default * setting default through a different method * David/aad stage 1 release (#556) * update to latest omsagent, add eastus2 to mdsd regions * copied oneagent bits to a CI repository release * mdsd inmem mode * yaml for cl scale test * yaml for cl scale test * reverting dockerProviderVersion version to 15.0.0 * prepping for release (updated image version, dockerProviderVersion, and release notes * container log scaletest yamls * forgot to update image version in chart * fixing windows tag in dockerfile, changing release notes wording * missed windows tag in one more place * forgot to change the windows dockerProviderVersion back Co-authored-by: Ganga Mahesh Siddem * Update ReleaseNotes.md (#558) fix imagetag in the release notes * Add wait time for telegraf and also force mdm egress to use tls 1.2 (#560) * Add wait time for telegraf and also force mdm egress to use tls 1.2 * add wait for all telegraf dependencies across all containers (ds & rs) * remove ssl change so we dont include as part of the other fix until we test with att nodes. * partially disabled telegraf liveness probe check, we'll still have telemetry but the probe won't fail if telegraf isn't running (#561) * changes for 05202021 release * fixed typos Co-authored-by: Ganga Mahesh Siddem Co-authored-by: Vishwanath Co-authored-by: rashmichandrashekar Co-authored-by: bragi92 Co-authored-by: saaror <31900410+saaror@users.noreply.github.com> Co-authored-by: Grace Wehner Co-authored-by: deagraw Co-authored-by: Michael Sinz <36865706+Michael-Sinz@users.noreply.github.com> Co-authored-by: Nicolas Yuen Co-authored-by: seenu433 --- ReleaseNotes.md | 8 ++- .../linux/installer/scripts/livenessprobe.sh | 4 +- build/version | 2 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/linux/main.sh | 52 +++++++++++++++++++ kubernetes/omsagent.yaml | 10 ++-- 6 files changed, 68 insertions(+), 10 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index b4c0d6ba4..d7d6de6af 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,9 +11,15 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 05/20/2021 - +##### Version microsoft/oms:ciprod05202021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021 (linux) +##### No Windows changes with this release, win-ciprod04222021 still current. +##### Code change log +- Telegraf now waits 30 seconds on startup for network connections to complete (Linux only) +- Change adding telegraf to the liveness probe reverted (Linux only) ### 05/12/2021 - -##### Version microsoft/oms:ciprod00512021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021 (linux) +##### Version microsoft/oms:ciprod05122021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021 (linux) ##### No Windows changes with this release, win-ciprod04222021 still current. ##### Code change log - Upgrading oneagent to version 1.8 (only for Linux) diff --git a/build/linux/installer/scripts/livenessprobe.sh b/build/linux/installer/scripts/livenessprobe.sh index 198b4e87f..5e1261e7e 100644 --- a/build/linux/installer/scripts/livenessprobe.sh +++ b/build/linux/installer/scripts/livenessprobe.sh @@ -30,9 +30,9 @@ fi (ps -ef | grep telegraf | grep -v "grep") if [ $? -ne 0 ] then - echo "Telegraf is not running" > /dev/termination-log + # echo "Telegraf is not running" > /dev/termination-log echo "Telegraf is not running (controller: ${CONTROLLER_TYPE}, container type: ${CONTAINER_TYPE})" > /dev/write-to-traces # this file is tailed and sent to traces - exit 1 + # exit 1 fi if [ -s "inotifyoutput.txt" ] diff --git a/build/version b/build/version index 81bb808f5..d70d1f9bc 100644 --- a/build/version +++ b/build/version @@ -3,7 +3,7 @@ # Build Version Information CONTAINER_BUILDVERSION_MAJOR=15 -CONTAINER_BUILDVERSION_MINOR=1 +CONTAINER_BUILDVERSION_MINOR=2 CONTAINER_BUILDVERSION_PATCH=0 CONTAINER_BUILDVERSION_BUILDNR=0 CONTAINER_BUILDVERSION_DATE=20210512 diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 822e52bc8..3ad3cd315 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod05122021 +ARG IMAGE_TAG=ciprod05202021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/main.sh b/kubernetes/linux/main.sh index f03318ad1..c7d939034 100644 --- a/kubernetes/linux/main.sh +++ b/kubernetes/linux/main.sh @@ -1,5 +1,43 @@ #!/bin/bash +waitforlisteneronTCPport() { + local sleepdurationsecs=1 + local totalsleptsecs=0 + local port=$1 + local waittimesecs=$2 + local numeric='^[0-9]+$' + local varlistener="" + + if [ -z "$1" ] || [ -z "$2" ]; then + echo "${FUNCNAME[0]} called with incorrect arguments<$1 , $2>. Required arguments <#port, #wait-time-in-seconds>" + return -1 + else + + if [[ $port =~ $numeric ]] && [[ $waittimesecs =~ $numeric ]]; then + #local varlistener=$(netstat -lnt | awk '$6 == "LISTEN" && $4 ~ ":25228$"') + while true + do + if [ $totalsleptsecs -gt $waittimesecs ]; then + echo "${FUNCNAME[0]} giving up waiting for listener on port:$port after $totalsleptsecs secs" + return 1 + fi + varlistener=$(netstat -lnt | awk '$6 == "LISTEN" && $4 ~ ":'"$port"'$"') + if [ -z "$varlistener" ]; then + #echo "${FUNCNAME[0]} waiting for $sleepdurationsecs more sec for listener on port:$port ..." + sleep $sleepdurationsecs + totalsleptsecs=$(($totalsleptsecs+1)) + else + echo "${FUNCNAME[0]} found listener on port:$port in $totalsleptsecs secs" + return 0 + fi + done + else + echo "${FUNCNAME[0]} called with non-numeric arguments<$1 , $2>. Required arguments <#port, #wait-time-in-seconds>" + return -1 + fi + fi +} + if [ -e "/etc/config/kube.conf" ]; then cat /etc/config/kube.conf > /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf elif [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then @@ -689,6 +727,20 @@ echo "export HOST_ETC=/hostfs/etc" >> ~/.bashrc export HOST_VAR=/hostfs/var echo "export HOST_VAR=/hostfs/var" >> ~/.bashrc +if [ ! -e "/etc/config/kube.conf" ]; then + if [ "${CONTAINER_TYPE}" == "PrometheusSidecar" ]; then + echo "checking for listener on tcp #25229 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25229 30 + else + echo "checking for listener on tcp #25226 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25226 30 + echo "checking for listener on tcp #25228 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25228 30 + fi +else + echo "checking for listener on tcp #25226 and waiting for 30 secs if not.." + waitforlisteneronTCPport 25226 30 +fi #start telegraf /opt/telegraf --config $telegrafConfFile & diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index bf94490ba..6ff02c941 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -358,7 +358,7 @@ spec: tier: node annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.1.0-0" + dockerProviderVersion: "15.2.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" imagePullPolicy: IfNotPresent resources: limits: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" imagePullPolicy: IfNotPresent resources: limits: @@ -583,13 +583,13 @@ spec: rsName: "omsagent-rs" annotations: agentVersion: "1.10.0.1" - dockerProviderVersion: "15.1.0-0" + dockerProviderVersion: "15.2.0-0" schema-versions: "v1" spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05122021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" imagePullPolicy: IfNotPresent resources: limits: From 24c88e449b87c5a62c4ddd30fad45fb6e46d2fc3 Mon Sep 17 00:00:00 2001 From: vishwanath Date: Wed, 23 Jun 2021 20:38:37 -0700 Subject: [PATCH 16/18] fix append --- source/plugins/ruby/in_kube_nodes.rb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/plugins/ruby/in_kube_nodes.rb b/source/plugins/ruby/in_kube_nodes.rb index 99e804302..4993edd7b 100644 --- a/source/plugins/ruby/in_kube_nodes.rb +++ b/source/plugins/ruby/in_kube_nodes.rb @@ -563,7 +563,7 @@ def clean_cache() # list is used so that we aren't modifying a hash while iterating through it. @cacheHash.each do |key, val| if cacheClearTime - @timeAdded[key] > @@RECORD_TIME_TO_LIVE - nodes_to_remove.append(key) + nodes_to_remove.push(key) end end From ff3d65f4d2bb94cdb998b6056c72914ab04d8f9b Mon Sep 17 00:00:00 2001 From: David Michelman Date: Mon, 28 Jun 2021 09:45:41 -0700 Subject: [PATCH 17/18] pre-release bookkeeping (image tag, ect) --- ReleaseNotes.md | 6 ++++++ charts/azuremonitor-containers/values.yaml | 2 +- kubernetes/linux/Dockerfile | 2 +- kubernetes/omsagent.yaml | 6 +++--- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index d7d6de6af..b7ce97da4 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,12 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 05/20/2021 hotfix - +##### Version microsoft/oms:ciprod05202021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix (linux) +##### No Windows changes with this release, win-ciprod04222021 still current. +##### Code change log +- Fixing a Ruby version-dependent bug ([].append() -> [].push()) + ### 05/20/2021 - ##### Version microsoft/oms:ciprod05202021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021 (linux) ##### No Windows changes with this release, win-ciprod04222021 still current. diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 9dd5317a4..2691e9950 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -21,7 +21,7 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod04222021" + tag: "ciprod05202021-hotfix" tagWindows: "win-ciprod04222021" pullPolicy: IfNotPresent dockerProviderVersion: "15.0.0-0" diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index 3ad3cd315..253612556 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod05202021 +ARG IMAGE_TAG=ciprod05202021-hotfix ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 6ff02c941..59119fd70 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -368,7 +368,7 @@ spec: value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix" imagePullPolicy: IfNotPresent resources: limits: @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix" imagePullPolicy: IfNotPresent resources: limits: @@ -589,7 +589,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix" imagePullPolicy: IfNotPresent resources: limits: From c604fc703baa828226153334afcf6efdb98cef1b Mon Sep 17 00:00:00 2001 From: David Michelman Date: Mon, 28 Jun 2021 10:44:32 -0700 Subject: [PATCH 18/18] made the change note more descriptive --- ReleaseNotes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ReleaseNotes.md b/ReleaseNotes.md index b7ce97da4..78881e2d1 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -15,7 +15,7 @@ Note : The agent version(s) below has dates (ciprod), which indicate t ##### Version microsoft/oms:ciprod05202021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021-hotfix (linux) ##### No Windows changes with this release, win-ciprod04222021 still current. ##### Code change log -- Fixing a Ruby version-dependent bug ([].append() -> [].push()) +- Fixing a bug where the input plugin for NodeInventory dies if any windows nodes are rmeoved from the cluster. ### 05/20/2021 - ##### Version microsoft/oms:ciprod05202021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod05202021 (linux)