Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
#!/bin/bash
# push the helm chart as an OCI artifact to specified ACR
# working directory of this script should be charts/azuremonitor-containers

export REPO_PATH="batch1/test/azure-monitor-containers"
# note: this repo registered in arc k8s extension for canary region
export REPO_PATH="public/azuremonitor/containerinsights/canary/preview/azuremonitor-containers"

export HELM_EXPERIMENTAL_OCI=1

for ARGUMENT in "$@"
Expand All @@ -11,32 +12,33 @@ do
VALUE=$(echo $ARGUMENT | cut -f2 -d=)

case "$KEY" in
CIARCACR) CIARCACR=$VALUE ;;
CIACR) CIACR=$VALUE ;;
CICHARTVERSION) CHARTVERSION=$VALUE ;;
*)
esac
done

echo "CI ARC K8S ACR: ${CIARCACR}"
echo "CI ARC K8S ACR: ${CIACR}"
echo "CI HELM CHART VERSION: ${CHARTVERSION}"

echo "start: read appid and appsecret"
ACR_APP_ID=$(cat ~/acrappid)
ACR_APP_SECRET=$(cat ~/acrappsecret)
echo "end: read appid and appsecret"

ACR=${CIARCACR}
ACR=${CIACR}

echo "login to acr:${ACR} using helm"
helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET

echo "login to acr:${ACR} using oras"
oras login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET
echo "login to acr:${ACR} completed: ${ACR}"

echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}"

echo "generate helm package"
helm package .
echo "save the chart locally with acr full path"
helm chart save . ${ACR}/${REPO_PATH}:${CHARTVERSION}

echo "pushing the helm chart as an OCI artifact"
oras push ${ACR}/${REPO_PATH}:${CHARTVERSION} --manifest-config /dev/null:application/vnd.unknown.config.v1+json ./azuremonitor-containers-${CHARTVERSION}.tgz:application/tar+gzip
echo "pushing the helm chart to ACR: ${ACR}"
helm chart push ${ACR}/${REPO_PATH}:${CHARTVERSION}

echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}"
53 changes: 53 additions & 0 deletions .pipelines/push-helm-chart-to-prod-repos.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
#!/bin/bash
# working directory of this script should be charts/azuremonitor-containers

# this repo used without extension public preview release
export PROD_REPO_PATH="public/azuremonitor/containerinsights/preview/azuremonitor-containers"

# note: this repo registered in arc k8s extension for prod group1 regions.
export EXTENSION_PROD_REPO_PATH="public/azuremonitor/containerinsights/prod1/preview/azuremonitor-containers"

export HELM_EXPERIMENTAL_OCI=1

for ARGUMENT in "$@"
do
KEY=$(echo $ARGUMENT | cut -f1 -d=)
VALUE=$(echo $ARGUMENT | cut -f2 -d=)

case "$KEY" in
CIACR) CIACR=$VALUE ;;
CICHARTVERSION) CHARTVERSION=$VALUE ;;
*)
esac
done

echo "CI ARC K8S ACR: ${CIACR}"
echo "CI HELM CHART VERSION: ${CHARTVERSION}"

echo "start: read appid and appsecret"
ACR_APP_ID=$(cat ~/acrappid)
ACR_APP_SECRET=$(cat ~/acrappsecret)
echo "end: read appid and appsecret"

ACR=${CIACR}

echo "login to acr:${ACR} using helm"
helm registry login $ACR --username $ACR_APP_ID --password $ACR_APP_SECRET

echo "login to acr:${ACR} completed: ${ACR}"

echo "start: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}"

echo "save the chart locally with acr full path: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}"
helm chart save . ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}

echo "save the chart locally with acr full path: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}"
helm chart save . ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}

echo "pushing the helm chart to ACR: ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}"
helm chart push ${ACR}/${EXTENSION_PROD_REPO_PATH}:${CHARTVERSION}

echo "pushing the helm chart to ACR: ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}"
helm chart push ${ACR}/${PROD_REPO_PATH}:${CHARTVERSION}

echo "end: push the chart version: ${CHARTVERSION} to acr repo: ${ACR}"
5 changes: 4 additions & 1 deletion ReleaseProcess.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,10 @@ Make PR against [AKS-Engine](https://github.com/Azure/aks-engine). Refer PR http

## ARO v4, On-prem K8s, Azure Arc K8s and OpenShift v4 clusters

Make PR against [HELM-charts](https://github.com/helm/charts) with Azure Monitor for containers chart update.
Make sure azuremonitor-containers chart yamls updates with all changes going with the release and also make sure to bump the chart version, imagetag and docker provider version etc. Similar to agent container image, build pipeline automatically push the chart to container insights prod acr for canary and prod repos accordingly.
Both the agent and helm chart will be replicated to `mcr.microsoft.com`.

The way, customers will be onboard the monitoring to these clusters using onboarding scripts under `onboarding\managed` directory so please bump chart version for prod release. Once we move to Arc K8s Monitoring extension Public preview, these will be taken care so at that point of time no manual changes like this required.

# 4. Monitor agent roll-out status

Expand Down
2 changes: 1 addition & 1 deletion charts/azuremonitor-containers/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ apiVersion: v1
appVersion: 7.0.0-1
description: Helm chart for deploying Azure Monitor container monitoring agent in Kubernetes
name: azuremonitor-containers
version: 2.7.4
version: 2.7.6
kubeVersion: "^1.10.0-0"
keywords:
- monitoring
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ spec:
agentVersion: {{ .Values.omsagent.image.tagWindows }}
dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }}
schema-versions: "v1"
checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }}
checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }}
spec:
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion }}
nodeSelector:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ spec:
agentVersion: {{ .Values.omsagent.image.tag }}
dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }}
schema-versions: "v1"
checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }}
checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }}
checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }}
spec:
{{- if .Values.omsagent.rbac }}
serviceAccountName: omsagent
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,9 @@ spec:
agentVersion: {{ .Values.omsagent.image.tag }}
dockerProviderVersion: {{ .Values.omsagent.image.dockerProviderVersion }}
schema-versions: "v1"
checksum/secret: {{ include (print $.Template.BasePath "/omsagent-secret.yaml") . | sha256sum }}
checksum/config: {{ toYaml .Values.omsagent.resources | sha256sum }}
checksum/logsettings: {{ toYaml .Values.omsagent.logsettings | sha256sum }}
spec:
{{- if .Values.omsagent.rbac }}
serviceAccountName: omsagent
Expand Down
6 changes: 3 additions & 3 deletions charts/azuremonitor-containers/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ Azure:
omsagent:
image:
repo: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod"
tag: "ciprod09162020"
tagWindows: "win-ciprod09162020"
tag: "ciprod09252020"
tagWindows: "win-ciprod09252020"
pullPolicy: IfNotPresent
dockerProviderVersion: "10.0.0-5"
dockerProviderVersion: "10.0.0-6"
agentVersion: "1.10.0.1"
## To get your workspace id and key do the following
## You can create a Azure Loganalytics workspace from portal.azure.com and get its ID & PRIMARY KEY from 'Advanced Settings' tab in the Ux.
Expand Down
12 changes: 4 additions & 8 deletions scripts/onboarding/managed/disable-monitoring.ps1
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
<#
.DESCRIPTION

Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc K8s, ARO v4 and AKS etc.
Disables Azure Monitor for containers to monitoring enabled Azure Managed K8s cluster such as Azure Arc enabled Kubernetes, ARO v4 and AKS etc.
1. Deletes the existing Azure Monitor for containers helm release
2. Deletes logAnalyticsWorkspaceResourceId tag on the provided Managed cluster

.PARAMETER clusterResourceId
Id of the Azure Managed Cluster such as Azure ARC K8s, ARO v4 etc.
Id of the Azure Managed Cluster such as Azure Arc enabled Kubernetes, ARO v4 etc.
.PARAMETER servicePrincipalClientId
client Id of the service principal which will be used for the azure login
.PARAMETER servicePrincipalClientSecret
Expand All @@ -18,7 +18,7 @@

Pre-requisites:
- Azure Managed cluster Resource Id
- Contributor role permission on the Subscription of the Azure Arc Cluster
- Contributor role permission on the Subscription of the Azure Arc enabled Kubernetes Cluster
- Helm v3.0.0 or higher https://github.com/helm/helm/releases
- kube-context of the K8s cluster
Note: 1. Please make sure you have all the pre-requisistes before running this script.
Expand Down Expand Up @@ -298,7 +298,7 @@ if ($isArcK8sCluster -eq $true) {
# validate identity
$clusterIdentity = $clusterResource.identity.type.ToString().ToLower()
if ($clusterIdentity.Contains("systemassigned") -eq $false) {
Write-Host("Identity of Azure Arc K8s cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red
Write-Host("Identity of Azure Arc enabled Kubernetes cluster should be systemassigned but it has identity: $clusterIdentity") -ForegroundColor Red
exit
}
}
Expand Down Expand Up @@ -354,7 +354,3 @@ catch {
}

Write-Host("Successfully disabled Azure Monitor for containers for cluster: $clusteResourceId") -ForegroundColor Green




10 changes: 5 additions & 5 deletions scripts/onboarding/managed/disable-monitoring.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ set -o pipefail

# default release name used during onboarding
releaseName="azmon-containers-release-1"
# resource type for azure arc clusters
# resource type for Azure Arc enabled Kubernetes clusters
resourceProvider="Microsoft.Kubernetes/connectedClusters"

# resource provider for azure arc connected cluster
# resource provider for Azure Arc enabled Kubernetes cluster
arcK8sResourceProvider="Microsoft.Kubernetes/connectedClusters"
# resource provider for azure redhat openshift v4 cluster
aroV4ResourceProvider="Microsoft.RedHatOpenShift/OpenShiftClusters"
Expand Down Expand Up @@ -125,13 +125,13 @@ remove_monitoring_tags()
echo "set the cluster subscription id: ${clusterSubscriptionId}"
az account set -s ${clusterSubscriptionId}

# validate cluster identity for ARC k8s cluster
# validate cluster identity for Azure Arc enabled Kubernetes cluster
if [ "$isArcK8sCluster" = true ] ; then
identitytype=$(az resource show -g ${clusterResourceGroup} -n ${clusterName} --resource-type $resourceProvider --query identity.type)
identitytype=$(echo $identitytype | tr "[:upper:]" "[:lower:]" | tr -d '"')
echo "cluster identity type:" $identitytype
if [[ "$identitytype" != "systemassigned" ]]; then
echo "-e only supported cluster identity is systemassigned for Azure ARC K8s cluster type"
echo "-e only supported cluster identity is systemassigned for Azure Arc enabled Kubernetes cluster type"
exit 1
fi
fi
Expand Down Expand Up @@ -257,7 +257,7 @@ done

# detect the resource provider from the provider name in the cluster resource id
if [ $providerName = "microsoft.kubernetes/connectedclusters" ]; then
echo "provider cluster resource is of Azure ARC K8s cluster type"
echo "provider cluster resource is of Azure Arc enabled Kubernetes cluster type"
isArcK8sCluster=true
resourceProvider=$arcK8sResourceProvider
elif [ $providerName = "microsoft.redhatopenshift/openshiftclusters" ]; then
Expand Down
Loading