diff --git a/.pipelines/push-helm-chart-as-oci-artifact.sh b/.pipelines/push-helm-chart-to-canary-repos.sh similarity index 54% rename from .pipelines/push-helm-chart-as-oci-artifact.sh rename to .pipelines/push-helm-chart-to-canary-repos.sh index 50e16e3d0..db8bff56e 100644 --- a/.pipelines/push-helm-chart-as-oci-artifact.sh +++ b/.pipelines/push-helm-chart-to-canary-repos.sh @@ -1,8 +1,9 @@ #!/bin/bash -# push the helm chart as an OCI artifact to specified ACR # working directory of this script should be charts/azuremonitor-containers -export REPO_PATH="batch1/test/azure-monitor-containers" +# note: this repo registered in arc k8s extension for canary region +export REPO_PATH="public/azuremonitor/containerinsights/canary/preview/azuremonitor-containers" + export HELM_EXPERIMENTAL_OCI=1 for ARGUMENT in "$@" @@ -11,13 +12,13 @@ do VALUE=$(echo $ARGUMENT | cut -f2 -d=) case "$KEY" in - CIARCACR) CIARCACR=$VALUE ;; + CIACR) CIACR=$VALUE ;; CICHARTVERSION) CHARTVERSION=$VALUE ;; *) esac done -echo "CI ARC K8S ACR: ${CIARCACR}" +echo "CI ARC K8S ACR: ${CIACR}" echo "CI HELM CHART VERSION: ${CHARTVERSION}" echo "start: read appid and appsecret" @@ -25,18 +26,19 @@ ACR_APP_ID=$(cat ~/acrappid) ACR_APP_SECRET=$(cat ~/acrappsecret) echo "end: read appid and appsecret" -ACR=${CIARCACR} +ACR=${CIACR} + +echo "login to acr:${ACR} using helm" +helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET -echo "login to acr:${ACR} using oras" -oras login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET echo "login to acr:${ACR} completed: ${ACR}" echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" -echo "generate helm package" -helm package . +echo "save the chart locally with acr full path" +helm chart save . ${ACR}/${REPO_PATH}:${CHARTVERSION} -echo "pushing the helm chart as an OCI artifact" -oras push ${ACR}/${REPO_PATH}:${CHARTVERSION} --manifest-config /dev/null:application/vnd.unknown.config.v1+json ./azuremonitor-containers-${CHARTVERSION}.tgz:application/tar+gzip +echo "pushing the helm chart to ACR: ${ACR}" +helm chart push ${ACR}/${REPO_PATH}:${CHARTVERSION} echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" diff --git a/.pipelines/push-helm-chart-to-prod-repos.sh b/.pipelines/push-helm-chart-to-prod-repos.sh new file mode 100644 index 000000000..71aa989de --- /dev/null +++ b/.pipelines/push-helm-chart-to-prod-repos.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# working directory of this script should be charts/azuremonitor-containers + +# this repo used without extension public preview release +export PROD_REPO_PATH="public/azuremonitor/containerinsights/preview/azuremonitor-containers" + +# note: this repo registered in arc k8s extension for prod group1 regions. +export EXTENSION_PROD_REPO_PATH="public/azuremonitor/containerinsights/prod1/preview/azuremonitor-containers" + +export HELM_EXPERIMENTAL_OCI=1 + +for ARGUMENT in "$@" +do + KEY=$(echo $ARGUMENT | cut -f1 -d=) + VALUE=$(echo $ARGUMENT | cut -f2 -d=) + + case "$KEY" in + CIACR) CIACR=$VALUE ;; + CICHARTVERSION) CHARTVERSION=$VALUE ;; + *) + esac +done + +echo "CI ARC K8S ACR: ${CIACR}" +echo "CI HELM CHART VERSION: ${CHARTVERSION}" + +echo "start: read appid and appsecret" +ACR_APP_ID=$(cat ~/acrappid) +ACR_APP_SECRET=$(cat ~/acrappsecret) +echo "end: read appid and appsecret" + +ACR=${CIACR} + +echo "login to acr:${ACR} using helm" +helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET + +echo "login to acr:${ACR} completed: ${ACR}" + +echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" + +echo "save the chart locally with acr full path: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}" +helm chart save . ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION} + +echo "save the chart locally with acr full path: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}" +helm chart save . ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION} + +echo "pushing the helm chart to ACR: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}" +helm chart push ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION} + +echo "pushing the helm chart to ACR: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}" +helm chart push ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION} + +echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}" diff --git a/ReleaseProcess.md b/ReleaseProcess.md index 19802e22c..2a3e6001a 100644 --- a/ReleaseProcess.md +++ b/ReleaseProcess.md @@ -45,7 +45,10 @@ Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR http ## ARO v4, On-prem K8s, Azure Arc K8s and OpenShift v4 clusters -Make PR against [HELM-charts](https://github.com/helm/charts) with Azure Monitor for containers chart update. +Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly. +Both the agent and helm chart will be replicated to `mcr.microsoft.com`. + +The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required. # 4. Monitor agent roll-out status diff --git a/charts/azuremonitor-containers/Chart.yaml b/charts/azuremonitor-containers/Chart.yaml index 8976b5561..1d3fed86f 100644 --- a/charts/azuremonitor-containers/Chart.yaml +++ b/charts/azuremonitor-containers/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v1 appVersion: 7.0.0-1 description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes name: azuremonitor-containers -version: 2.7.4 +version: 2.7.6 kubeVersion: "^1.10.0-0" keywords: - monitoring diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml index 72b09f6c1..e65f9a98d 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset-windows.yaml @@ -24,6 +24,8 @@ spec: agentVersion: {{ .Values.omsagent.image.tagWindows }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} spec: {{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }} nodeSelector: diff --git a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml index 7514247a0..438294ce5 100644 --- a/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-daemonset.yaml @@ -24,6 +24,9 @@ spec: agentVersion: {{ .Values.omsagent.image.tag }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} + checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent diff --git a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml index 7d7ac7040..8609d25c9 100644 --- a/charts/azuremonitor-containers/templates/omsagent-deployment.yaml +++ b/charts/azuremonitor-containers/templates/omsagent-deployment.yaml @@ -25,6 +25,9 @@ spec: agentVersion: {{ .Values.omsagent.image.tag }} dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }} schema-versions: "v1" + checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }} + checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }} + checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }} spec: {{- if .Values.omsagent.rbac }} serviceAccountName: omsagent diff --git a/charts/azuremonitor-containers/values.yaml b/charts/azuremonitor-containers/values.yaml index 1804d1197..2711cb372 100644 --- a/charts/azuremonitor-containers/values.yaml +++ b/charts/azuremonitor-containers/values.yaml @@ -12,10 +12,10 @@ Azure: omsagent: image: repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod" - tag: "ciprod09162020" - tagWindows: "win-ciprod09162020" + tag: "ciprod09252020" + tagWindows: "win-ciprod09252020" pullPolicy: IfNotPresent - dockerProviderVersion: "10.0.0-5" + dockerProviderVersion: "10.0.0-6" agentVersion: "1.10.0.1" ## To get your workspace id and key do the following ## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux. diff --git a/scripts/onboarding/managed/disable-monitoring.ps1 b/scripts/onboarding/managed/disable-monitoring.ps1 index 8945f90b6..1c011bfff 100644 --- a/scripts/onboarding/managed/disable-monitoring.ps1 +++ b/scripts/onboarding/managed/disable-monitoring.ps1 @@ -1,12 +1,12 @@ <# .DESCRIPTION - Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc K8s, ARO v4 and AKS etc. + Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc enabled Kubernetes, ARO v4 and AKS etc. 1. Deletes the existing Azure Monitor for containers helm release 2. Deletes logAnalyticsWorkspaceResourceId tag on the provided Managed cluster .PARAMETER clusterResourceId - Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + Id of the Azure Managed Cluster such as Azure Arc enabled Kubernetes, ARO v4 etc. .PARAMETER servicePrincipalClientId client Id of the service principal which will be used for the azure login .PARAMETER servicePrincipalClientSecret @@ -18,7 +18,7 @@ Pre-requisites: - Azure Managed cluster Resource Id - - Contributor role permission on the Subscription of the Azure Arc Cluster + - Contributor role permission on the Subscription of the Azure Arc enabled Kubernetes Cluster - Helm v3.0.0 or higher https://github.com/helm/helm/releases - kube-context of the K8s cluster Note: 1. Please make sure you have all the pre-requisistes before running this script. @@ -298,7 +298,7 @@ if ($isArcK8sCluster -eq $true) { # validate identity $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() if ($clusterIdentity.Contains("systemassigned") -eq $false) { - Write-Host("Identity of Azure Arc K8s cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red + Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red exit } } @@ -354,7 +354,3 @@ catch { } Write-Host("Successfully disabled Azure Monitor for containers for cluster: $clusteResourceId") -ForegroundColor Green - - - - diff --git a/scripts/onboarding/managed/disable-monitoring.sh b/scripts/onboarding/managed/disable-monitoring.sh index f20bd7d33..c11426f30 100644 --- a/scripts/onboarding/managed/disable-monitoring.sh +++ b/scripts/onboarding/managed/disable-monitoring.sh @@ -26,10 +26,10 @@ set -o pipefail # default release name used during onboarding releaseName="azmon-containers-release-1" -# resource type for azure arc clusters +# resource type for Azure Arc enabled Kubernetes clusters resourceProvider="Microsoft.Kubernetes/connectedClusters" -# resource provider for azure arc connected cluster +# resource provider for Azure Arc enabled Kubernetes cluster arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" # resource provider for azure redhat openshift v4 cluster aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" @@ -125,13 +125,13 @@ remove_monitoring_tags() echo "set the cluster subscription id: ${clusterSubscriptionId}" az account set -s ${clusterSubscriptionId} - # validate cluster identity for ARC k8s cluster + # validate cluster identity for Azure Arc enabled Kubernetes cluster if [ "$isArcK8sCluster" = true ] ; then identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type) identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then - echo "-e only supported cluster identity is systemassigned for Azure ARC K8s cluster type" + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" exit 1 fi fi @@ -257,7 +257,7 @@ done # detect the resource provider from the provider name in the cluster resource id if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then - echo "provider cluster resource is of Azure ARC K8s cluster type" + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" isArcK8sCluster=true resourceProvider=$arcK8sResourceProvider elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then diff --git a/scripts/onboarding/managed/enable-monitoring.ps1 b/scripts/onboarding/managed/enable-monitoring.ps1 index 338de6cbc..1e1669400 100644 --- a/scripts/onboarding/managed/enable-monitoring.ps1 +++ b/scripts/onboarding/managed/enable-monitoring.ps1 @@ -1,14 +1,14 @@ <# .DESCRIPTION - Onboards Azure Monitor for containers to Azure Managed Kuberenetes such as Azure Arc K8s, ARO v4 and AKS etc. + Onboards Azure Monitor for containers to Azure Managed Kuberenetes such as Azure Arc enabled Kubernetes, ARO v4 and AKS etc. 1. Creates the Default Azure log analytics workspace if doesn't exist one in specified subscription 2. Adds the ContainerInsights solution to the Azure log analytics workspace 3. Adds the workspaceResourceId tag or enable addon (if the cluster is AKS) on the provided Managed cluster resource id 4. Installs Azure Monitor for containers HELM chart to the K8s cluster in provided via --kube-context .PARAMETER clusterResourceId - Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc. + Id of the Azure Managed Cluster such as Azure Arc enabled Kubernetes, ARO v4 etc. .PARAMETER servicePrincipalClientId Client Id of the service principal which will be used for the azure login .PARAMETER servicePrincipalClientSecret @@ -22,10 +22,6 @@ .PARAMETER proxyEndpoint (optional) Provide Proxy endpoint if you have K8s cluster behind the proxy and would like to route Azure Monitor for containers outbound traffic via proxy. Format of the proxy endpoint should be http(s://:@: - .PARAMETER helmRepoName (optional) - helm repo name. should be used only for the private preview features - .PARAMETER helmRepoUrl (optional) - helm repo url. should be used only for the private preview features Pre-requisites: - Azure Managed cluster Resource Id @@ -50,30 +46,23 @@ param( [Parameter(mandatory = $false)] [string]$workspaceResourceId, [Parameter(mandatory = $false)] - [string]$proxyEndpoint, - [Parameter(mandatory = $false)] - [string]$helmRepoName, - [Parameter(mandatory = $false)] - [string]$helmRepoUrl + [string]$proxyEndpoint ) -$solutionTemplateUri= "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" +$solutionTemplateUri = "https://raw.githubusercontent.com/microsoft/Docker-Provider/ci_dev/scripts/onboarding/templates/azuremonitor-containerSolution.json" $helmChartReleaseName = "azmon-containers-release-1" $helmChartName = "azuremonitor-containers" -$helmChartRepoName = "incubator" -$helmChartRepoUrl = "https://kubernetes-charts-incubator.storage.googleapis.com/" + # flags to indicate the cluster types $isArcK8sCluster = $false -$isAksCluster = $false +$isAksCluster = $false $isUsingServicePrincipal = $false -if([string]::IsNullOrEmpty($helmRepoName) -eq $false){ - $helmChartRepoName = $helmRepoName -} - -if([string]::IsNullOrEmpty($helmRepoUrl) -eq $false){ - $helmChartRepoUrl = $helmRepoUrl -} +# released chart version in mcr +$mcr = "mcr.microsoft.com" +$mcrChartVersion = "2.7.6" +$mcrChartRepoPath = "azuremonitor/containerinsights/preview/azuremonitor-containers" +$helmLocalRepoName = "." # checks the required Powershell modules exist and if not exists, request the user permission to install $azAccountModule = Get-Module -ListAvailable -Name Az.Accounts @@ -200,7 +189,7 @@ if (($null -eq $azAccountModule) -or ($null -eq $azResourcesModule) -or ($null - } if ([string]::IsNullOrEmpty($clusterResourceId)) { - Write-Host("Specified Azure Arc ClusterResourceId should not be NULL or empty") -ForegroundColor Red + Write-Host("Specified Azure Arc enabled Kubernetes ClusterResourceId should not be NULL or empty") -ForegroundColor Red exit } @@ -220,30 +209,31 @@ if ($clusterResourceId.StartsWith("/") -eq $false) { $clusterResourceId = "/" + $clusterResourceId } -if ($clusterResourceId.Split("/").Length -ne 9){ - Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red +if ($clusterResourceId.Split("/").Length -ne 9) { + Write-Host("Provided Cluster Resource Id is not in expected format") -ForegroundColor Red exit } if (($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -ne $true) -and ($clusterResourceId.ToLower().Contains("microsoft.redhatopenshift/openshiftclusters") -ne $true) -and ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -ne $true) - ) { +) { Write-Host("Provided cluster ResourceId is not supported cluster type: $clusterResourceId") -ForegroundColor Red exit } -if(([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and - ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and - ([string]::IsNullOrEmpty($tenantId) -eq $false)) { - Write-Host("Using service principal creds for the azure login since these provided.") - $isUsingServicePrincipal = $true +if (([string]::IsNullOrEmpty($servicePrincipalClientId) -eq $false) -and + ([string]::IsNullOrEmpty($servicePrincipalClientSecret) -eq $false) -and + ([string]::IsNullOrEmpty($tenantId) -eq $false)) { + Write-Host("Using service principal creds for the azure login since these provided.") + $isUsingServicePrincipal = $true } if ($clusterResourceId.ToLower().Contains("microsoft.kubernetes/connectedclusters") -eq $true) { - $isArcK8sCluster = $true -} elseif ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -eq $true) { - $isAksCluster = $true + $isArcK8sCluster = $true +} +elseif ($clusterResourceId.ToLower().Contains("microsoft.containerservice/managedclusters") -eq $true) { + $isAksCluster = $true } $resourceParts = $clusterResourceId.Split("/") @@ -253,7 +243,7 @@ Write-Host("Cluster SubscriptionId : '" + $clusterSubscriptionId + "' ") -Foregr if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force - $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId } @@ -275,12 +265,13 @@ if ($null -eq $account.Account) { try { if ($isUsingServicePrincipal) { $spSecret = ConvertTo-SecureString -String $servicePrincipalClientSecret -AsPlainText -Force - $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId,$spSecret + $spCreds = New-Object -TypeName "System.Management.Automation.PSCredential" -ArgumentList $servicePrincipalClientId, $spSecret Connect-AzAccount -ServicePrincipal -Credential $spCreds -Tenant $tenantId -Subscription $clusterSubscriptionId - } else { - Write-Host("Please login...") - Connect-AzAccount -subscriptionid $clusterSubscriptionId - } + } + else { + Write-Host("Please login...") + Connect-AzAccount -subscriptionid $clusterSubscriptionId + } } catch { Write-Host("") @@ -322,12 +313,12 @@ if ($null -eq $clusterResource) { $clusterRegion = $clusterResource.Location.ToLower() if ($isArcK8sCluster -eq $true) { - # validate identity - $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() - if ($clusterIdentity.contains("systemassigned") -eq $false) { - Write-Host("Identity of Azure Arc K8s cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red - exit - } + # validate identity + $clusterIdentity = $clusterResource.identity.type.ToString().ToLower() + if ($clusterIdentity.contains("systemassigned") -eq $false) { + Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red + exit + } } if ([string]::IsNullOrEmpty($workspaceResourceId)) { @@ -514,7 +505,8 @@ if ($account.Subscription.Id -eq $clusterSubscriptionId) { if ($isAksCluster -eq $true) { Write-Host ("Enabling AKS Monitoring Addon ..") # TBD -} else { +} +else { Write-Host("Attaching workspaceResourceId tag on the cluster ResourceId") $clusterResource.Tags["logAnalyticsWorkspaceResourceId"] = $WorkspaceInformation.ResourceId Set-AzResource -Tag $clusterResource.Tags -ResourceId $clusterResource.ResourceId -Force @@ -526,20 +518,30 @@ Write-Host "Helm version" : $helmVersion Write-Host("Installing or upgrading if exists, Azure Monitor for containers HELM chart ...") try { - Write-Host("Adding $helmChartRepoName repo to helm: $helmChartRepoUrl") - helm repo add $helmChartRepoName $helmChartRepoUrl - Write-Host("updating helm repo to get latest version of charts") - helm repo update + Write-Host("pull the chart from mcr.microsoft.com") + [System.Environment]::SetEnvironmentVariable("HELM_EXPERIMENTAL_OCI", 1, "Process") + + Write-Host("pull the chart from mcr.microsoft.com") + helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} + + Write-Host("export the chart from local cache to current directory") + helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + + $helmChartRepoPath = "${helmLocalRepoName}" + "/" + "${helmChartName}" + + Write-Host("helmChartRepoPath is : ${helmChartRepoPath}") + $helmParameters = "omsagent.secret.wsid=$workspaceGUID,omsagent.secret.key=$workspacePrimarySharedKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion" - if([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { + if ([string]::IsNullOrEmpty($proxyEndpoint) -eq $false) { Write-Host("using proxy endpoint since its provided") $helmParameters = $helmParameters + ",omsagent.proxy=$proxyEndpoint" } if ([string]::IsNullOrEmpty($kubeContext)) { - helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoName/$helmChartName - } else { - Write-Host("using provided kube-context: $kubeContext") - helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoName/$helmChartName --kube-context $kubeContext + helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoPath + } + else { + Write-Host("using provided kube-context: $kubeContext") + helm upgrade --install $helmChartReleaseName --set $helmParameters $helmChartRepoPath --kube-context $kubeContext } } catch { @@ -548,7 +550,3 @@ catch { Write-Host("Successfully enabled Azure Monitor for containers for cluster: $clusterResourceId") -ForegroundColor Green Write-Host("Proceed to https://aka.ms/azmon-containers to view your newly onboarded Azure Managed cluster") -ForegroundColor Green - - - - diff --git a/scripts/onboarding/managed/enable-monitoring.sh b/scripts/onboarding/managed/enable-monitoring.sh index 226fd978b..ce62a581a 100644 --- a/scripts/onboarding/managed/enable-monitoring.sh +++ b/scripts/onboarding/managed/enable-monitoring.sh @@ -41,9 +41,11 @@ set -o pipefail # default to public cloud since only supported cloud is azure public clod defaultAzureCloud="AzureCloud" -# helm repo details -helmRepoName="incubator" -helmRepoUrl="https://kubernetes-charts-incubator.storage.googleapis.com/" +# released chart version in mcr +mcrChartVersion="2.7.6" +mcr="mcr.microsoft.com" +mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" +helmLocalRepoName="." helmChartName="azuremonitor-containers" # default release name used during onboarding @@ -58,19 +60,18 @@ aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters" # resource provider for aks cluster aksResourceProvider="Microsoft.ContainerService/managedClusters" -# default of resourceProvider is arc k8s and this will get updated based on the provider cluster resource +# default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource resourceProvider="Microsoft.Kubernetes/connectedClusters" - # resource type for azure log analytics workspace workspaceResourceProvider="Microsoft.OperationalInsights/workspaces" # openshift project name for aro v4 cluster openshiftProjectName="azure-monitor-for-containers" -# arc k8s cluster resource +# AROv4 cluster resource isAroV4Cluster=false -# arc k8s cluster resource +# Azure Arc enabled Kubernetes cluster resource isArcK8sCluster=false # aks cluster resource @@ -103,28 +104,25 @@ servicePrincipalClientSecret="" servicePrincipalTenantId="" isUsingServicePrincipal=false -usage() -{ - local basename=`basename $0` - echo - echo "Enable Azure Monitor for containers:" - echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ] [--workspace-id ] [--proxy ]" +usage() { + local basename=$(basename $0) + echo + echo "Enable Azure Monitor for containers:" + echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ] [--workspace-id ] [--proxy ]" } -parse_args() -{ +parse_args() { - if [ $# -le 1 ] - then + if [ $# -le 1 ]; then usage exit 1 - fi + fi -# Transform long options to short ones -for arg in "$@"; do - shift - case "$arg" in - "--resource-id") set -- "$@" "-r" ;; + # Transform long options to short ones + for arg in "$@"; do + shift + case "$arg" in + "--resource-id") set -- "$@" "-r" ;; "--kube-context") set -- "$@" "-k" ;; "--workspace-id") set -- "$@" "-w" ;; "--proxy") set -- "$@" "-p" ;; @@ -134,130 +132,128 @@ for arg in "$@"; do "--helm-repo-name") set -- "$@" "-n" ;; "--helm-repo-url") set -- "$@" "-u" ;; "--container-log-volume") set -- "$@" "-v" ;; - "--"*) usage ;; - *) set -- "$@" "$arg" - esac -done + "--"*) usage ;; + *) set -- "$@" "$arg" ;; + esac + done -local OPTIND opt + local OPTIND opt -while getopts 'hk:r:w:p:c:s:t:n:u:v:' opt; do + while getopts 'hk:r:w:p:c:s:t:n:u:v:' opt; do case "$opt" in - h) + h) + usage + ;; + + k) + kubeconfigContext="$OPTARG" + echo "name of kube-context is $OPTARG" + ;; + + r) + clusterResourceId="$OPTARG" + echo "clusterResourceId is $OPTARG" + ;; + + w) + workspaceResourceId="$OPTARG" + echo "workspaceResourceId is $OPTARG" + ;; + + p) + proxyEndpoint="$OPTARG" + echo "proxyEndpoint is $OPTARG" + ;; + + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + + n) + helmRepoName="$OPTARG" + echo "helm repo name is $OPTARG" + ;; + + u) + helmRepoUrl="$OPTARG" + echo "helm repo url is $OPTARG" + ;; + + v) + containerLogVolume="$OPTARG" + echo "container log volume is $OPTARG" + ;; + + ?) usage - ;; - - k) - kubeconfigContext="$OPTARG" - echo "name of kube-context is $OPTARG" - ;; - - r) - clusterResourceId="$OPTARG" - echo "clusterResourceId is $OPTARG" - ;; - - w) - workspaceResourceId="$OPTARG" - echo "workspaceResourceId is $OPTARG" - ;; - - p) - proxyEndpoint="$OPTARG" - echo "proxyEndpoint is $OPTARG" - ;; - - c) - servicePrincipalClientId="$OPTARG" - echo "servicePrincipalClientId is $OPTARG" - ;; - - s) - servicePrincipalClientSecret="$OPTARG" - echo "clientSecret is *****" - ;; - - t) - servicePrincipalTenantId="$OPTARG" - echo "service principal tenantId is $OPTARG" - ;; - - n) - helmRepoName="$OPTARG" - echo "helm repo name is $OPTARG" - ;; - - u) - helmRepoUrl="$OPTARG" - echo "helm repo url is $OPTARG" - ;; - - v) - containerLogVolume="$OPTARG" - echo "container log volume is $OPTARG" - ;; - - ?) - usage - exit 1 - ;; + exit 1 + ;; esac done - shift "$(($OPTIND -1))" + shift "$(($OPTIND - 1))" + local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" + local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" - local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" - local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" + # get resource parts and join back to get the provider name + local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" + local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" + local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2})" - # get resource parts and join back to get the provider name - local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" - local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" - local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2} )" + local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" - local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" + # convert to lowercase for validation + providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") - # convert to lowercase for validation - providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") + echo "cluster SubscriptionId:" $subscriptionId + echo "cluster ResourceGroup:" $resourceGroup + echo "cluster ProviderName:" $providerName + echo "cluster Name:" $clusterName - echo "cluster SubscriptionId:" $subscriptionId - echo "cluster ResourceGroup:" $resourceGroup - echo "cluster ProviderName:" $providerName - echo "cluster Name:" $clusterName - - if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then + if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then echo "-e invalid cluster resource id. Please try with valid fully qualified resource id of the cluster" exit 1 - fi + fi - if [[ $providerName != microsoft.* ]]; then - echo "-e invalid azure cluster resource id format." - exit 1 - fi + if [[ $providerName != microsoft.* ]]; then + echo "-e invalid azure cluster resource id format." + exit 1 + fi - # detect the resource provider from the provider name in the cluster resource id - # detect the resource provider from the provider name in the cluster resource id - if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then - echo "provider cluster resource is of Azure ARC K8s cluster type" + # detect the resource provider from the provider name in the cluster resource id + if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" isArcK8sCluster=true resourceProvider=$arcK8sResourceProvider - elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then + elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then echo "provider cluster resource is of AROv4 cluster type" resourceProvider=$aroV4ResourceProvider isAroV4Cluster=true - elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then + elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then echo "provider cluster resource is of AKS cluster type" isAksCluster=true resourceProvider=$aksResourceProvider - else - echo "-e unsupported azure managed cluster type" - exit 1 - fi + else + echo "-e unsupported azure managed cluster type" + exit 1 + fi - if [ -z "$kubeconfigContext" ]; then + if [ -z "$kubeconfigContext" ]; then echo "using or getting current kube config context since --kube-context parameter not set " - fi + fi -if [ ! -z "$workspaceResourceId" ]; then + if [ ! -z "$workspaceResourceId" ]; then local workspaceSubscriptionId="$(echo $workspaceResourceId | cut -d'/' -f3)" local workspaceResourceGroup="$(echo $workspaceResourceId | cut -d'/' -f5)" local workspaceProviderName="$(echo $workspaceResourceId | cut -d'/' -f7)" @@ -269,13 +265,13 @@ if [ ! -z "$workspaceResourceId" ]; then echo "workspace ProviderName:" $workspaceName echo "workspace Name:" $workspaceName - if [[ $workspaceProviderName != microsoft.operationalinsights* ]]; then - echo "-e invalid azure log analytics resource id format." - exit 1 - fi -fi + if [[ $workspaceProviderName != microsoft.operationalinsights* ]]; then + echo "-e invalid azure log analytics resource id format." + exit 1 + fi + fi -if [ ! -z "$proxyEndpoint" ]; then + if [ ! -z "$proxyEndpoint" ]; then # Validate Proxy Endpoint URL # extract the protocol:// proto="$(echo $proxyEndpoint | grep :// | sed -e's,^\(.*://\).*,\1,g')" @@ -302,23 +298,21 @@ if [ ! -z "$proxyEndpoint" ]; then else echo "successfully validated provided proxy endpoint is valid and in expected format" fi -fi + fi -if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then - echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" - isUsingServicePrincipal=true -fi + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi } -configure_to_public_cloud() -{ +configure_to_public_cloud() { echo "Set AzureCloud as active cloud for az cli" az cloud set -n $defaultAzureCloud } -validate_cluster_identity() -{ +validate_cluster_identity() { echo "validating cluster identity" local rgName="$(echo ${1})" @@ -329,15 +323,14 @@ validate_cluster_identity() echo "cluster identity type:" $identitytype if [[ "$identitytype" != "systemassigned" ]]; then - echo "-e only supported cluster identity is systemassigned for Azure ARC K8s cluster type" - exit 1 + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" + exit 1 fi echo "successfully validated the identity of the cluster" } -create_default_log_analytics_workspace() -{ +create_default_log_analytics_workspace() { # extract subscription from cluster resource id local subscriptionId="$(echo $clusterResourceId | cut -d'/' -f3)" @@ -348,73 +341,71 @@ create_default_log_analytics_workspace() # mapping fors for default Azure Log Analytics workspace declare -A AzureCloudLocationToOmsRegionCodeMap=( - [australiasoutheast]=ASE - [australiaeast]=EAU - [australiacentral]=CAU - [canadacentral]=CCA - [centralindia]=CIN - [centralus]=CUS - [eastasia]=EA - [eastus]=EUS - [eastus2]=EUS2 - [eastus2euap]=EAP - [francecentral]=PAR - [japaneast]=EJP - [koreacentral]=SE - [northeurope]=NEU - [southcentralus]=SCUS - [southeastasia]=SEA - [uksouth]=SUK - [usgovvirginia]=USGV - [westcentralus]=EUS - [westeurope]=WEU - [westus]=WUS - [westus2]=WUS2 + [australiasoutheast]=ASE + [australiaeast]=EAU + [australiacentral]=CAU + [canadacentral]=CCA + [centralindia]=CIN + [centralus]=CUS + [eastasia]=EA + [eastus]=EUS + [eastus2]=EUS2 + [eastus2euap]=EAP + [francecentral]=PAR + [japaneast]=EJP + [koreacentral]=SE + [northeurope]=NEU + [southcentralus]=SCUS + [southeastasia]=SEA + [uksouth]=SUK + [usgovvirginia]=USGV + [westcentralus]=EUS + [westeurope]=WEU + [westus]=WUS + [westus2]=WUS2 ) declare -A AzureCloudRegionToOmsRegionMap=( - [australiacentral]=australiacentral - [australiacentral2]=australiacentral - [australiaeast]=australiaeast - [australiasoutheast]=australiasoutheast - [brazilsouth]=southcentralus - [canadacentral]=canadacentral - [canadaeast]=canadacentral - [centralus]=centralus - [centralindia]=centralindia - [eastasia]=eastasia - [eastus]=eastus - [eastus2]=eastus2 - [francecentral]=francecentral - [francesouth]=francecentral - [japaneast]=japaneast - [japanwest]=japaneast - [koreacentral]=koreacentral - [koreasouth]=koreacentral - [northcentralus]=eastus - [northeurope]=northeurope - [southafricanorth]=westeurope - [southafricawest]=westeurope - [southcentralus]=southcentralus - [southeastasia]=southeastasia - [southindia]=centralindia - [uksouth]=uksouth - [ukwest]=uksouth - [westcentralus]=eastus - [westeurope]=westeurope - [westindia]=centralindia - [westus]=westus - [westus2]=westus2 + [australiacentral]=australiacentral + [australiacentral2]=australiacentral + [australiaeast]=australiaeast + [australiasoutheast]=australiasoutheast + [brazilsouth]=southcentralus + [canadacentral]=canadacentral + [canadaeast]=canadacentral + [centralus]=centralus + [centralindia]=centralindia + [eastasia]=eastasia + [eastus]=eastus + [eastus2]=eastus2 + [francecentral]=francecentral + [francesouth]=francecentral + [japaneast]=japaneast + [japanwest]=japaneast + [koreacentral]=koreacentral + [koreasouth]=koreacentral + [northcentralus]=eastus + [northeurope]=northeurope + [southafricanorth]=westeurope + [southafricawest]=westeurope + [southcentralus]=southcentralus + [southeastasia]=southeastasia + [southindia]=centralindia + [uksouth]=uksouth + [ukwest]=uksouth + [westcentralus]=eastus + [westeurope]=westeurope + [westindia]=centralindia + [westus]=westus + [westus2]=westus2 ) - if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; - then + if [ -n "${AzureCloudRegionToOmsRegionMap[$clusterRegion]}" ]; then workspaceRegion=${AzureCloudRegionToOmsRegionMap[$clusterRegion]} fi echo "Workspace Region:"$workspaceRegion - if [ -n "${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]}" ]; - then + if [ -n "${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]}" ]; then workspaceRegionCode=${AzureCloudLocationToOmsRegionCodeMap[$workspaceRegion]} fi echo "Workspace Region Code:"$workspaceRegionCode @@ -423,30 +414,28 @@ create_default_log_analytics_workspace() isRGExists=$(az group exists -g $workspaceResourceGroup) workspaceName="DefaultWorkspace-"$subscriptionId"-"$workspaceRegionCode - if $isRGExists - then echo "using existing default resource group:"$workspaceResourceGroup + if $isRGExists; then + echo "using existing default resource group:"$workspaceResourceGroup else echo "creating resource group: $workspaceResourceGroup in region: $workspaceRegion" az group create -g $workspaceResourceGroup -l $workspaceRegion fi - workspaceList=$(az resource list -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider) - if [ "$workspaceList" = "[]" ]; - then - # create new default workspace since no mapped existing default workspace - echo '{"location":"'"$workspaceRegion"'", "properties":{"sku":{"name": "standalone"}}}' > WorkspaceProps.json - cat WorkspaceProps.json - workspace=$(az resource create -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --is-full-object -p @WorkspaceProps.json) + workspaceList=$(az resource list -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider) + if [ "$workspaceList" = "[]" ]; then + # create new default workspace since no mapped existing default workspace + echo '{"location":"'"$workspaceRegion"'", "properties":{"sku":{"name": "standalone"}}}' >WorkspaceProps.json + cat WorkspaceProps.json + workspace=$(az resource create -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --is-full-object -p @WorkspaceProps.json) else echo "using existing default workspace:"$workspaceName fi - workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) + workspaceResourceId=$(az resource show -g $workspaceResourceGroup -n $workspaceName --resource-type $workspaceResourceProvider --query id) workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') } -add_container_insights_solution() -{ +add_container_insights_solution() { local resourceId="$(echo ${1})" # extract resource group from workspace resource id @@ -456,10 +445,9 @@ add_container_insights_solution() solution=$(az deployment group create -g $resourceGroup --template-uri $solutionTemplateUri --parameters workspaceResourceId=$resourceId --parameters workspaceRegion=$workspaceRegion) } -get_workspace_guid_and_key() -{ +get_workspace_guid_and_key() { # extract resource parts from workspace resource id - local resourceId="$(echo ${1} | tr -d '"' )" + local resourceId="$(echo ${1} | tr -d '"')" local subId="$(echo ${resourceId} | cut -d'/' -f3)" local rgName="$(echo ${resourceId} | cut -d'/' -f5)" local wsName="$(echo ${resourceId} | cut -d'/' -f9)" @@ -474,11 +462,10 @@ get_workspace_guid_and_key() workspaceKey=$(echo $workspaceKey | tr -d '"') } -install_helm_chart() -{ +install_helm_chart() { - # get the config-context for ARO v4 cluster - if [ "$isAroV4Cluster" = true ] ; then + # get the config-context for ARO v4 cluster + if [ "$isAroV4Cluster" = true ]; then echo "getting config-context of ARO v4 cluster " echo "getting admin user creds for aro v4 cluster" adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) @@ -490,83 +477,84 @@ install_helm_chart() oc new-project $openshiftProjectName echo "getting config-context of aro v4 cluster" kubeconfigContext=$(oc config current-context) - fi - - if [ -z "$kubeconfigContext" ]; then - echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." - else - echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." - fi - - echo "getting the region of the cluster" - clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) - echo "cluster region is : ${clusterRegion}" - - echo "adding helm repo:" $helmRepoName - helm repo add $helmRepoName $helmRepoUrl - - echo "updating helm repo to get latest charts" - helm repo update - - if [ ! -z "$proxyEndpoint" ]; then - echo "using proxy endpoint since proxy configuration passed in" - if [ -z "$kubeconfigContext" ]; then - echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName - else - echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} - fi - else - if [ -z "$kubeconfigContext" ]; then - echo "using current kube-context since --kube-context/-k parameter not passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName - else - echo "using --kube-context:${kubeconfigContext} since passed in" - helm upgrade --install azmon-containers-release-1 --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmRepoName/$helmChartName --kube-context ${kubeconfigContext} - fi - fi - - echo "chart installation completed." + fi + + if [ -z "$kubeconfigContext" ]; then + echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." + else + echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." + fi + + echo "getting the region of the cluster" + clusterRegion=$(az resource show --ids ${clusterResourceId} --query location -o tsv) + echo "cluster region is : ${clusterRegion}" + + echo "pull the chart version ${mcrChartVersion} from ${mcr}/${mcrChartRepoPath}" + export HELM_EXPERIMENTAL_OCI=1 + helm chart pull $mcr/$mcrChartRepoPath:$mcrChartVersion + + echo "export the chart from local cache to current directory" + helm chart export $mcr/$mcrChartRepoPath:$mcrChartVersion --destination . + + helmChartRepoPath=$helmLocalRepoName/$helmChartName + + echo "helm chart repo path: ${helmChartRepoPath}" + + if [ ! -z "$proxyEndpoint" ]; then + echo "using proxy endpoint since proxy configuration passed in" + if [ -z "$kubeconfigContext" ]; then + echo "using current kube-context since --kube-context/-k parameter not passed in" + helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + else + echo "using --kube-context:${kubeconfigContext} since passed in" + helm upgrade --install $releaseName --set omsagent.proxy=$proxyEndpoint,omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + fi + else + if [ -z "$kubeconfigContext" ]; then + echo "using current kube-context since --kube-context/-k parameter not passed in" + helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath + else + echo "using --kube-context:${kubeconfigContext} since passed in" + helm upgrade --install $releaseName --set omsagent.secret.wsid=$workspaceGuid,omsagent.secret.key=$workspaceKey,omsagent.env.clusterId=$clusterResourceId,omsagent.env.clusterRegion=$clusterRegion $helmChartRepoPath --kube-context ${kubeconfigContext} + fi + fi + + echo "chart installation completed." } -login_to_azure() -{ - if [ "$isUsingServicePrincipal" = true ] ; then - echo "login to the azure using provided service principal creds" - az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId +login_to_azure() { + if [ "$isUsingServicePrincipal" = true ]; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId else echo "login to the azure interactively" az login --use-device-code fi } -set_azure_subscription() -{ - local subscriptionId="$(echo ${1})" - echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" - az account set -s ${subscriptionId} - echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" +set_azure_subscription() { + local subscriptionId="$(echo ${1})" + echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" + az account set -s ${subscriptionId} + echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" } -attach_monitoring_tags() -{ +attach_monitoring_tags() { echo "attach loganalyticsworkspaceResourceId tag on to cluster resource" - status=$(az resource update --set tags.logAnalyticsWorkspaceResourceId=$workspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + status=$(az resource update --set tags.logAnalyticsWorkspaceResourceId=$workspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) echo "$status" echo "successfully attached logAnalyticsWorkspaceResourceId tag on the cluster resource" } # enables aks monitoring addon for private preview and dont use this for aks prod -enable_aks_monitoring_addon() -{ - echo "getting cluster object" - clusterGetResponse=$(az rest --method get --uri $clusterResourceId?api-version=2020-03-01) - export jqquery=".properties.addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID=\"$workspaceResourceId\"" - echo $clusterGetResponse | jq $jqquery > putrequestbody.json - status=$(az rest --method put --uri $clusterResourceId?api-version=2020-03-01 --body @putrequestbody.json --headers Content-Type=application/json) - echo "status after enabling of aks monitoringa addon:$status" +enable_aks_monitoring_addon() { + echo "getting cluster object" + clusterGetResponse=$(az rest --method get --uri $clusterResourceId?api-version=2020-03-01) + export jqquery=".properties.addonProfiles.omsagent.config.logAnalyticsWorkspaceResourceID=\"$workspaceResourceId\"" + echo $clusterGetResponse | jq $jqquery >putrequestbody.json + status=$(az rest --method put --uri $clusterResourceId?api-version=2020-03-01 --body @putrequestbody.json --headers Content-Type=application/json) + echo "status after enabling of aks monitoringa addon:$status" } # parse and validate args @@ -587,9 +575,9 @@ login_to_azure # set the cluster subscription id as active sub for azure cli set_azure_subscription $clusterSubscriptionId -# validate cluster identity if its ARC k8s cluster -if [ "$isArcK8sCluster" = true ] ; then - validate_cluster_identity $clusterResourceGroup $clusterName +# validate cluster identity if its Azure Arc enabled Kubernetes cluster +if [ "$isArcK8sCluster" = true ]; then + validate_cluster_identity $clusterResourceGroup $clusterName fi if [ -z $workspaceResourceId ]; then @@ -598,7 +586,7 @@ if [ -z $workspaceResourceId ]; then else echo "using provided azure log analytics workspace:${workspaceResourceId}" workspaceResourceId=$(echo $workspaceResourceId | tr -d '"') - workspaceSubscriptionId="$(echo ${workspaceResourceId} | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]" )" + workspaceSubscriptionId="$(echo ${workspaceResourceId} | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" workspaceResourceGroup="$(echo ${workspaceResourceId} | cut -d'/' -f5)" workspaceName="$(echo ${workspaceResourceId} | cut -d'/' -f9)" @@ -620,13 +608,13 @@ add_container_insights_solution $workspaceResourceId # get workspace guid and key get_workspace_guid_and_key $workspaceResourceId -if [ "$isClusterAndWorkspaceInSameSubscription" = false ] ; then +if [ "$isClusterAndWorkspaceInSameSubscription" = false ]; then echo "switch to cluster subscription id as active subscription for cli: ${clusterSubscriptionId}" set_azure_subscription $clusterSubscriptionId fi # attach monitoring tags on to cluster resource -if [ "$isAksCluster" = true ] ; then +if [ "$isAksCluster" = true ]; then enable_aks_monitoring_addon else attach_monitoring_tags diff --git a/scripts/onboarding/managed/upgrade-monitoring.sh b/scripts/onboarding/managed/upgrade-monitoring.sh new file mode 100644 index 000000000..8a12b2f02 --- /dev/null +++ b/scripts/onboarding/managed/upgrade-monitoring.sh @@ -0,0 +1,314 @@ +#!/bin/bash +# +# Execute this directly in Azure Cloud Shell (https://shell.azure.com) by pasting (SHIFT+INS on Windows, CTRL+V on Mac or Linux) +# the following line (beginning with curl...) at the command prompt and then replacing the args: +# This scripts upgrades the existing Azure Monitor for containers release on Azure Arc enabled Kubernetes cluster +# +# 1. Upgrades existing Azure Monitor for containers release to the K8s cluster in provided via --kube-context +# Prerequisites : +# Azure CLI: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest +# Helm3 : https://helm.sh/docs/intro/install/ + +# download script +# curl -o enable-monitoring.sh -L https://aka.ms/upgrade-monitoring-bash-script +# 1. Using Service Principal for Azure Login +## bash upgrade-monitoring.sh --client-id --client-secret --tenant-id +# 2. Using Interactive device login +# bash upgrade-monitoring.sh --resource-id + +set -e +set -o pipefail + +# released chart version for Azure Arc enabled Kubernetes public preview +mcrChartVersion="2.7.6" +mcr="mcr.microsoft.com" +mcrChartRepoPath="azuremonitor/containerinsights/preview/azuremonitor-containers" + +# default to public cloud since only supported cloud is azure public clod +defaultAzureCloud="AzureCloud" +helmLocalRepoName="." +helmChartName="azuremonitor-containers" + +# default release name used during onboarding +releaseName="azmon-containers-release-1" + +# resource provider for azure arc connected cluster +arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters" + +# default of resourceProvider is Azure Arc enabled Kubernetes and this will get updated based on the provider cluster resource +resourceProvider="Microsoft.Kubernetes/connectedClusters" + +# Azure Arc enabled Kubernetes cluster resource +isArcK8sCluster=false + +# openshift project name for aro v4 cluster +openshiftProjectName="azure-monitor-for-containers" + +# Azure Arc enabled Kubernetes cluster resource +isAroV4Cluster=false + +# default global params +clusterResourceId="" +kubeconfigContext="" + +# default workspace region and code +workspaceRegion="eastus" +workspaceRegionCode="EUS" +workspaceResourceGroup="DefaultResourceGroup-"$workspaceRegionCode + +# default workspace guid and key +workspaceGuid="" +workspaceKey="" + +# sp details for the login if provided +servicePrincipalClientId="" +servicePrincipalClientSecret="" +servicePrincipalTenantId="" +isUsingServicePrincipal=false + +usage() { + local basename=$(basename $0) + echo + echo "Upgrade Azure Monitor for containers:" + echo "$basename --resource-id [--client-id ] [--client-secret ] [--tenant-id ] [--kube-context ]" +} + +parse_args() { + + if [ $# -le 1 ]; then + usage + exit 1 + fi + + # Transform long options to short ones + for arg in "$@"; do + shift + case "$arg" in + "--resource-id") set -- "$@" "-r" ;; + "--kube-context") set -- "$@" "-k" ;; + "--client-id") set -- "$@" "-c" ;; + "--client-secret") set -- "$@" "-s" ;; + "--tenant-id") set -- "$@" "-t" ;; + "--"*) usage ;; + *) set -- "$@" "$arg" ;; + esac + done + + local OPTIND opt + + while getopts 'hk:r:c:s:t:' opt; do + case "$opt" in + h) + usage + ;; + + k) + kubeconfigContext="$OPTARG" + echo "name of kube-context is $OPTARG" + ;; + + r) + clusterResourceId="$OPTARG" + echo "clusterResourceId is $OPTARG" + ;; + + c) + servicePrincipalClientId="$OPTARG" + echo "servicePrincipalClientId is $OPTARG" + ;; + + s) + servicePrincipalClientSecret="$OPTARG" + echo "clientSecret is *****" + ;; + + t) + servicePrincipalTenantId="$OPTARG" + echo "service principal tenantId is $OPTARG" + ;; + + ?) + usage + exit 1 + ;; + esac + done + shift "$(($OPTIND - 1))" + + local subscriptionId="$(echo ${clusterResourceId} | cut -d'/' -f3)" + local resourceGroup="$(echo ${clusterResourceId} | cut -d'/' -f5)" + + # get resource parts and join back to get the provider name + local providerNameResourcePart1="$(echo ${clusterResourceId} | cut -d'/' -f7)" + local providerNameResourcePart2="$(echo ${clusterResourceId} | cut -d'/' -f8)" + local providerName="$(echo ${providerNameResourcePart1}/${providerNameResourcePart2})" + + local clusterName="$(echo ${clusterResourceId} | cut -d'/' -f9)" + + # convert to lowercase for validation + providerName=$(echo $providerName | tr "[:upper:]" "[:lower:]") + + echo "cluster SubscriptionId:" $subscriptionId + echo "cluster ResourceGroup:" $resourceGroup + echo "cluster ProviderName:" $providerName + echo "cluster Name:" $clusterName + + if [ -z "$subscriptionId" -o -z "$resourceGroup" -o -z "$providerName" -o -z "$clusterName" ]; then + echo "-e invalid cluster resource id. Please try with valid fully qualified resource id of the cluster" + exit 1 + fi + + if [[ $providerName != microsoft.* ]]; then + echo "-e invalid azure cluster resource id format." + exit 1 + fi + + # detect the resource provider from the provider name in the cluster resource id + if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then + echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type" + isArcK8sCluster=true + resourceProvider=$arcK8sResourceProvider + elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then + echo "provider cluster resource is of AROv4 cluster type" + resourceProvider=$aroV4ResourceProvider + isAroV4Cluster=true + elif [ $providerName = "microsoft.containerservice/managedclusters" ]; then + echo "provider cluster resource is of AKS cluster type" + isAksCluster=true + resourceProvider=$aksResourceProvider + else + echo "-e unsupported azure managed cluster type" + exit 1 + fi + + if [ -z "$kubeconfigContext" ]; then + echo "using or getting current kube config context since --kube-context parameter not set " + fi + + if [ ! -z "$servicePrincipalClientId" -a ! -z "$servicePrincipalClientSecret" -a ! -z "$servicePrincipalTenantId" ]; then + echo "using service principal creds (clientId, secret and tenantId) for azure login since provided" + isUsingServicePrincipal=true + fi +} + +configure_to_public_cloud() { + echo "Set AzureCloud as active cloud for az cli" + az cloud set -n $defaultAzureCloud +} + +validate_cluster_identity() { + echo "validating cluster identity" + + local rgName="$(echo ${1})" + local clusterName="$(echo ${2})" + + local identitytype=$(az resource show -g ${rgName} -n ${clusterName} --resource-type $resourceProvider --query identity.type) + identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"') + echo "cluster identity type:" $identitytype + + if [[ "$identitytype" != "systemassigned" ]]; then + echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type" + exit 1 + fi + + echo "successfully validated the identity of the cluster" +} + +validate_monitoring_tags() { + echo "get loganalyticsworkspaceResourceId tag on to cluster resource" + logAnalyticsWorkspaceResourceIdTag=$(az resource show --query tags.logAnalyticsWorkspaceResourceId -g $clusterResourceGroup -n $clusterName --resource-type $resourceProvider) + echo "configured log analytics workspace: ${logAnalyticsWorkspaceResourceIdTag}" + echo "successfully got logAnalyticsWorkspaceResourceId tag on the cluster resource" + if [ -z "$logAnalyticsWorkspaceResourceIdTag" ]; then + echo "-e logAnalyticsWorkspaceResourceId doesnt exist on this cluster which indicates cluster not enabled for monitoring" + exit 1 + fi +} + + +upgrade_helm_chart_release() { + + # get the config-context for ARO v4 cluster + if [ "$isAroV4Cluster" = true ]; then + echo "getting config-context of ARO v4 cluster " + echo "getting admin user creds for aro v4 cluster" + adminUserName=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminUsername' -o tsv) + adminPassword=$(az aro list-credentials -g $clusterResourceGroup -n $clusterName --query 'kubeadminPassword' -o tsv) + apiServer=$(az aro show -g $clusterResourceGroup -n $clusterName --query apiserverProfile.url -o tsv) + echo "login to the cluster via oc login" + oc login $apiServer -u $adminUserName -p $adminPassword + echo "creating project azure-monitor-for-containers" + oc new-project $openshiftProjectName + echo "getting config-context of aro v4 cluster" + kubeconfigContext=$(oc config current-context) + fi + + if [ -z "$kubeconfigContext" ]; then + echo "installing Azure Monitor for containers HELM chart on to the cluster and using current kube context ..." + else + echo "installing Azure Monitor for containers HELM chart on to the cluster with kubecontext:${kubeconfigContext} ..." + fi + + export HELM_EXPERIMENTAL_OCI=1 + + echo "pull the chart from ${mcr}/${mcrChartRepoPath}:${mcrChartVersion}" + helm chart pull ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} + + echo "export the chart from local cache to current directory" + helm chart export ${mcr}/${mcrChartRepoPath}:${mcrChartVersion} --destination . + + helmChartRepoPath=$helmLocalRepoName/$helmChartName + + echo "upgrading the release: $releaseName to chart version : ${mcrChartVersion}" + helm get values $releaseName -o yaml | helm upgrade --install $releaseName $helmChartRepoPath -f - + echo "$releaseName got upgraded successfully." +} + +login_to_azure() { + if [ "$isUsingServicePrincipal" = true ]; then + echo "login to the azure using provided service principal creds" + az login --service-principal --username $servicePrincipalClientId --password $servicePrincipalClientSecret --tenant $servicePrincipalTenantId + else + echo "login to the azure interactively" + az login --use-device-code + fi +} + +set_azure_subscription() { + local subscriptionId="$(echo ${1})" + echo "setting the subscription id: ${subscriptionId} as current subscription for the azure cli" + az account set -s ${subscriptionId} + echo "successfully configured subscription id: ${subscriptionId} as current subscription for the azure cli" +} + +# parse and validate args +parse_args $@ + +# configure azure cli for public cloud +configure_to_public_cloud + +# parse cluster resource id +clusterSubscriptionId="$(echo $clusterResourceId | cut -d'/' -f3 | tr "[:upper:]" "[:lower:]")" +clusterResourceGroup="$(echo $clusterResourceId | cut -d'/' -f5)" +providerName="$(echo $clusterResourceId | cut -d'/' -f7)" +clusterName="$(echo $clusterResourceId | cut -d'/' -f9)" + +# login to azure +login_to_azure + +# set the cluster subscription id as active sub for azure cli +set_azure_subscription $clusterSubscriptionId + +# validate cluster identity if its Azure Arc enabled Kubernetes cluster +if [ "$isArcK8sCluster" = true ]; then + validate_cluster_identity $clusterResourceGroup $clusterName +fi + +# validate the cluster has monitoring tags +validate_monitoring_tags + +# upgrade helm chart release +upgrade_helm_chart_release + +# portal link +echo "Proceed to https://aka.ms/azmon-containers to view health of your newly onboarded cluster"