diff --git a/ReleaseNotes.md b/ReleaseNotes.md index 0c51b737c..dc42e7d51 100644 --- a/ReleaseNotes.md +++ b/ReleaseNotes.md @@ -11,6 +11,19 @@ additional questions or comments. Note : The agent version(s) below has dates (ciprod), which indicate the agent build dates (not release dates) +### 08/05/2021 - +##### Version microsoft/oms:ciprod08052021 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021 (linux) +##### Code change log +- Linux Agent + - Fix for CPU spike which occurrs at around 6.30am UTC on every day because of unattended package upgrades + - Update MDSD build which has fixes for the following issues + - Undeterministic Core dump issue because of the non 200 status code and runtime exception stack unwindings + - Reduce the verbosity of the error logs for OMS & ODS code paths. + - Increase Timeout for OMS Homing service API calls from 30s to 60s + - Fix for https://github.com/Azure/AKS/issues/2457 + - In replicaset, tailing of the mdsd.err log file to agent telemetry + + ### 07/13/2021 - ##### Version microsoft/oms:win-ciprod06112021-2 Version mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2 (windows) ##### Code change log diff --git a/kubernetes/linux/Dockerfile b/kubernetes/linux/Dockerfile index b47841757..07af7f4a7 100644 --- a/kubernetes/linux/Dockerfile +++ b/kubernetes/linux/Dockerfile @@ -2,7 +2,7 @@ FROM ubuntu:18.04 MAINTAINER OMSContainers@microsoft.com LABEL vendor=Microsoft\ Corp \ com.microsoft.product="Azure Monitor for containers" -ARG IMAGE_TAG=ciprod06112021 +ARG IMAGE_TAG=ciprod08052021 ENV AGENT_VERSION ${IMAGE_TAG} ENV tmpdir /opt ENV APPLICATIONINSIGHTS_AUTH NzAwZGM5OGYtYTdhZC00NThkLWI5NWMtMjA3ZjM3NmM3YmRi diff --git a/kubernetes/linux/setup.sh b/kubernetes/linux/setup.sh index b7cddffbc..df32afc7e 100644 --- a/kubernetes/linux/setup.sh +++ b/kubernetes/linux/setup.sh @@ -9,8 +9,8 @@ sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ dpkg-reconfigure --frontend=noninteractive locales && \ update-locale LANG=en_US.UTF-8 -#install oneagent - Official bits (06/24/2021) -wget https://github.com/microsoft/Docker-Provider/releases/download/06242021-oneagent/azure-mdsd_1.10.3-build.master.241_x86_64.deb +#install oneagent - Official bits (08/04/2021) +wget https://github.com/microsoft/Docker-Provider/releases/download/08042021-oneagent/azure-mdsd_1.10.1-build.master.251_x86_64.deb /usr/bin/dpkg -i $TMPDIR/azure-mdsd*.deb cp -f $TMPDIR/mdsd.xml /etc/mdsd.d @@ -47,8 +47,8 @@ sudo apt-get update sudo apt-get install td-agent-bit=1.6.8 -y # install ruby2.6 -sudo apt-get install software-properties-common -y -sudo apt-add-repository ppa:brightbox/ruby-ng -y +sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F5DA5F09C3173AA6 +sudo echo "deb http://ppa.launchpad.net/brightbox/ruby-ng/ubuntu bionic main" >> /etc/apt/sources.list sudo apt-get update sudo apt-get install ruby2.6 ruby2.6-dev gcc make -y # fluentd v1 gem @@ -62,6 +62,9 @@ rm -f $TMPDIR/azure-mdsd*.deb rm -f $TMPDIR/mdsd.xml rm -f $TMPDIR/envmdsd +# remove build dependencies +sudo apt-get remove ruby2.6-dev gcc make -y + # Remove settings for cron.daily that conflict with the node's cron.daily. Since both are trying to rotate the same files # in /var/log at the same time, the rotation doesn't happen correctly and then the *.1 file is forever logged to. rm /etc/logrotate.d/alternatives /etc/logrotate.d/apt /etc/logrotate.d/azure-mdsd /etc/logrotate.d/rsyslog diff --git a/kubernetes/omsagent.yaml b/kubernetes/omsagent.yaml index 855f3a8e1..49d4586c1 100644 --- a/kubernetes/omsagent.yaml +++ b/kubernetes/omsagent.yaml @@ -362,13 +362,13 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent - dnsConfig: + dnsConfig: options: - name: ndots - value: "3" + value: "3" containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -384,7 +384,7 @@ spec: - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests - - name: ISTEST + - name: ISTEST value: "true" #Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME @@ -446,7 +446,7 @@ spec: timeoutSeconds: 15 #Only in sidecar scraping mode - name: omsagent-prometheus - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -589,7 +589,7 @@ spec: serviceAccountName: omsagent containers: - name: omsagent - image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06112021" + image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08052021" imagePullPolicy: IfNotPresent resources: limits: @@ -604,8 +604,8 @@ spec: - name: AKS_REGION value: "VALUE_AKS_RESOURCE_REGION_VALUE" # this used for e2e test and setting this just emits some additional log statements which used for the e2e tests - - name: ISTEST - value: "true" + - name: ISTEST + value: "true" # Uncomment below two lines for ACS clusters and set the cluster names manually. Also comment out the above two lines for ACS clusters #- name: ACS_RESOURCE_NAME # value: "my_acs_cluster_name" @@ -754,10 +754,10 @@ spec: schema-versions: "v1" spec: serviceAccountName: omsagent - dnsConfig: + dnsConfig: options: - name: ndots - value: "3" + value: "3" containers: - name: omsagent-win image: "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:win-ciprod06112021-2" diff --git a/source/plugins/go/src/oms.go b/source/plugins/go/src/oms.go index a2937073b..91a5b4b40 100644 --- a/source/plugins/go/src/oms.go +++ b/source/plugins/go/src/oms.go @@ -165,17 +165,17 @@ var ( // ADX tenantID AdxTenantID string //ADX client secret - AdxClientSecret string + AdxClientSecret string // container log or container log v2 tag name for oneagent route - MdsdContainerLogTagName string + MdsdContainerLogTagName string // kubemonagent events tag name for oneagent route MdsdKubeMonAgentEventsTagName string // InsightsMetrics tag name for oneagent route - MdsdInsightsMetricsTagName string + MdsdInsightsMetricsTagName string // flag to check if its Windows OS IsWindows bool - // container type - ContainerType string + // container type + ContainerType string // flag to check whether LA AAD MSI Auth Enabled or not IsAADMSIAuthMode bool ) @@ -206,7 +206,7 @@ var ( // IngestionAuthTokenUpdateMutex read and write mutex access for ODSIngestionAuthToken IngestionAuthTokenUpdateMutex = &sync.Mutex{} // ODSIngestionAuthToken for windows agent AAD MSI Auth - ODSIngestionAuthToken string + ODSIngestionAuthToken string ) var ( @@ -355,12 +355,12 @@ const ( ) // DataType to be used as enum per data type socket client creation -type DataType int +type DataType int const ( // DataType to be used as enum per data type socket client creation ContainerLogV2 DataType = iota - KubeMonAgentEvents - InsightsMetrics + KubeMonAgentEvents + InsightsMetrics ) func createLogger() *log.Logger { @@ -610,7 +610,7 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) if err != nil { @@ -623,10 +623,10 @@ func flushKubeMonAgentEventRecords() { Log(message) SendException(message) } else { - msgPackEntry := MsgPackEntry{ + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } @@ -649,23 +649,23 @@ func flushKubeMonAgentEventRecords() { Message: k, Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) if err != nil { message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) Log(message) SendException(message) - } else { - if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + } else { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { message := fmt.Sprintf("Error while UnMarhalling json bytes to stringmap: %s", err.Error()) Log(message) SendException(message) } else { - msgPackEntry := MsgPackEntry{ + msgPackEntry := MsgPackEntry{ Record: stringMap, - } - msgPackEntries = append(msgPackEntries, msgPackEntry) + } + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } @@ -698,66 +698,66 @@ func flushKubeMonAgentEventRecords() { Message: "No errors", Tags: fmt.Sprintf("%s", tagJson), } - laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) + laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord) var stringMap map[string]string jsonBytes, err := json.Marshal(&laKubeMonAgentEventsRecord) - if err != nil { + if err != nil { message := fmt.Sprintf("Error while Marshalling laKubeMonAgentEventsRecord to json bytes: %s", err.Error()) Log(message) SendException(message) } else { - if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { + if err := json.Unmarshal(jsonBytes, &stringMap); err != nil { message := fmt.Sprintf("Error while UnMarshalling json bytes to stringmap: %s", err.Error()) Log(message) SendException(message) - } else { - msgPackEntry := MsgPackEntry{ + } else { + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) + msgPackEntries = append(msgPackEntries, msgPackEntry) } } } } - if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route + if (IsWindows == false && len(msgPackEntries) > 0) { //for linux, mdsd route if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdKubeMonAgentEventsTagName, MdsdOutputStreamIdTagPrefix) == false { Log("Info::mdsd::obtaining output stream id for data type: %s", KubeMonAgentEventDataType) MdsdKubeMonAgentEventsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(KubeMonAgentEventDataType) - } + } Log("Info::mdsd:: using mdsdsource name for KubeMonAgentEvents: %s", MdsdKubeMonAgentEventsTagName) - msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdKubeMonAgentEventsTagName, msgPackEntries) if MdsdKubeMonMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection for KubeMonAgentEvents does not exist. re-connecting ...") CreateMDSDClient(KubeMonAgentEvents, ContainerType) if MdsdKubeMonMsgpUnixSocketClient == nil { - Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() - KubeMonEventsMDSDClientCreateErrors += 1 - } + KubeMonEventsMDSDClientCreateErrors += 1 + } } - if MdsdKubeMonMsgpUnixSocketClient != nil { + if MdsdKubeMonMsgpUnixSocketClient != nil { deadline := 10 * time.Second - MdsdKubeMonMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + MdsdKubeMonMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse bts, er := MdsdKubeMonMsgpUnixSocketClient.Write(msgpBytes) - elapsed = time.Since(start) + elapsed = time.Since(start) if er != nil { message := fmt.Sprintf("Error::mdsd::Failed to write to kubemonagent mdsd %d records after %s. Will retry ... error : %s", len(msgPackEntries), elapsed, er.Error()) Log(message) if MdsdKubeMonMsgpUnixSocketClient != nil { MdsdKubeMonMsgpUnixSocketClient.Close() MdsdKubeMonMsgpUnixSocketClient = nil - } + } SendException(message) } else { numRecords := len(msgPackEntries) Log("FlushKubeMonAgentEventRecords::Info::Successfully flushed %d records that was %d bytes in %s", numRecords, bts, elapsed) // Send telemetry to AppInsights resource SendEvent(KubeMonAgentEventsFlushedEvent, telemetryDimensions) - } + } } else { - Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") - } + Log("Error::mdsd::Unable to create mdsd client for KubeMonAgentEvents. Please check error log.") + } } else if len(laKubeMonAgentEventsRecords) > 0 { //for windows, ODS direct kubeMonAgentEventEntry := KubeMonAgentEventBlob{ DataType: KubeMonAgentEventDataType, @@ -784,10 +784,10 @@ func flushKubeMonAgentEventRecords() { if IsAADMSIAuthMode == true { IngestionAuthTokenUpdateMutex.Lock() ingestionAuthToken := ODSIngestionAuthToken - IngestionAuthTokenUpdateMutex.Unlock() - if ingestionAuthToken == "" { - Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") - } + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + } req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) } @@ -900,15 +900,15 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int message := fmt.Sprintf("PostTelegrafMetricsToLA::Info:derived %v metrics from %v timeseries", len(laMetrics), len(telegrafRecords)) Log(message) } - + if IsWindows == false { //for linux, mdsd route - var msgPackEntries []MsgPackEntry + var msgPackEntries []MsgPackEntry var i int start := time.Now() var elapsed time.Duration - for i = 0; i < len(laMetrics); i++ { - var interfaceMap map[string]interface{} + for i = 0; i < len(laMetrics); i++ { + var interfaceMap map[string]interface{} stringMap := make(map[string]string) jsonBytes, err := json.Marshal(*laMetrics[i]) if err != nil { @@ -917,35 +917,35 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int SendException(message) return output.FLB_OK } else { - if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { + if err := json.Unmarshal(jsonBytes, &interfaceMap); err != nil { message := fmt.Sprintf("Error while UnMarshalling json bytes to interfaceMap: %s", err.Error()) Log(message) SendException(message) return output.FLB_OK - } else { + } else { for key, value := range interfaceMap { strKey := fmt.Sprintf("%v", key) strValue := fmt.Sprintf("%v", value) stringMap[strKey] = strValue - } - msgPackEntry := MsgPackEntry{ + } + msgPackEntry := MsgPackEntry{ Record: stringMap, } - msgPackEntries = append(msgPackEntries, msgPackEntry) - } + msgPackEntries = append(msgPackEntries, msgPackEntry) + } } } - if (len(msgPackEntries) > 0) { + if (len(msgPackEntries) > 0) { if IsAADMSIAuthMode == true && (strings.HasPrefix(MdsdInsightsMetricsTagName, MdsdOutputStreamIdTagPrefix) == false) { Log("Info::mdsd::obtaining output stream id for InsightsMetricsDataType since Log Analytics AAD MSI Auth Enabled") MdsdInsightsMetricsTagName = extension.GetInstance(FLBLogger, ContainerType).GetOutputStreamId(InsightsMetricsDataType) - } - msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) + } + msgpBytes := convertMsgPackEntriesToMsgpBytes(MdsdInsightsMetricsTagName, msgPackEntries) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { Log("Error::mdsd::mdsd connection does not exist. re-connecting ...") CreateMDSDClient(InsightsMetrics, ContainerType) if MdsdInsightsMetricsMsgpUnixSocketClient == nil { - Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") + Log("Error::mdsd::Unable to create mdsd client for insights metrics. Please check error log.") ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() InsightsMetricsMDSDClientCreateErrors += 1 @@ -954,7 +954,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int } deadline := 10 * time.Second - MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse + MdsdInsightsMetricsMsgpUnixSocketClient.SetWriteDeadline(time.Now().Add(deadline)) //this is based of clock time, so cannot reuse bts, er := MdsdInsightsMetricsMsgpUnixSocketClient.Write(msgpBytes) elapsed = time.Since(start) @@ -969,7 +969,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int ContainerLogTelemetryMutex.Lock() defer ContainerLogTelemetryMutex.Unlock() - InsightsMetricsMDSDClientCreateErrors += 1 + InsightsMetricsMDSDClientCreateErrors += 1 return output.FLB_RETRY } else { numTelegrafMetricsRecords := len(msgPackEntries) @@ -977,7 +977,7 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int Log("Success::mdsd::Successfully flushed %d telegraf metrics records that was %d bytes to mdsd in %s ", numTelegrafMetricsRecords, bts, elapsed) } } - + } else { // for windows, ODS direct var metrics []laTelegrafMetric @@ -1019,9 +1019,9 @@ func PostTelegrafMetricsToLA(telegrafRecords []map[interface{}]interface{}) int if IsAADMSIAuthMode == true { IngestionAuthTokenUpdateMutex.Lock() ingestionAuthToken := ODSIngestionAuthToken - IngestionAuthTokenUpdateMutex.Unlock() - if ingestionAuthToken == "" { - message := "Error::ODS Ingestion Auth Token is empty. Please check error log." + IngestionAuthTokenUpdateMutex.Unlock() + if ingestionAuthToken == "" { + message := "Error::ODS Ingestion Auth Token is empty. Please check error log." Log(message) return output.FLB_RETRY } @@ -1232,7 +1232,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords := 0 if len(msgPackEntries) > 0 && ContainerLogsRouteV2 == true { - //flush to mdsd + //flush to mdsd if IsAADMSIAuthMode == true && strings.HasPrefix(MdsdContainerLogTagName, MdsdOutputStreamIdTagPrefix) == false { Log("Info::mdsd::obtaining output stream id") if ContainerLogSchemaV2 == true { @@ -1242,7 +1242,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } Log("Info::mdsd:: using mdsdsource name: %s", MdsdContainerLogTagName) } - + fluentForward := MsgPackForward{ Tag: MdsdContainerLogTagName, Entries: msgPackEntries, @@ -1359,7 +1359,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { numContainerLogRecords = len(dataItemsADX) Log("Success::ADX::Successfully wrote %d container log records to ADX in %s", numContainerLogRecords, elapsed) - } else { //ODS + } else if ((ContainerLogSchemaV2 == true && len(dataItemsLAv2) > 0) || len(dataItemsLAv1) > 0) { //ODS var logEntry interface{} recordType := "" loglinesCount := 0 @@ -1401,19 +1401,19 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { if ResourceCentric == true { req.Header.Set("x-ms-AzureResourceId", ResourceID) } - + if IsAADMSIAuthMode == true { IngestionAuthTokenUpdateMutex.Lock() ingestionAuthToken := ODSIngestionAuthToken IngestionAuthTokenUpdateMutex.Unlock() - if ingestionAuthToken == "" { - Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") + if ingestionAuthToken == "" { + Log("Error::ODS Ingestion Auth Token is empty. Please check error log.") return output.FLB_RETRY } // add authorization header to the req req.Header.Set("Authorization", "Bearer "+ingestionAuthToken) - } - + } + resp, err := HTTPClient.Do(req) elapsed = time.Since(start) @@ -1422,7 +1422,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { Log(message) // Commenting this out for now. TODO - Add better telemetry for ods errors using aggregation //SendException(message) - + Log("Failed to flush %d records after %s", loglinesCount, elapsed) return output.FLB_RETRY @@ -1510,7 +1510,7 @@ func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, str } // InitializePlugin reads and populates plugin configuration -func InitializePlugin(pluginConfPath string, agentVersion string) { +func InitializePlugin(pluginConfPath string, agentVersion string) { go func() { isTest := os.Getenv("ISTEST") if strings.Compare(strings.ToLower(strings.TrimSpace(isTest)), "true") == 0 { @@ -1550,10 +1550,10 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } ContainerType = os.Getenv(ContainerTypeEnv) - Log("Container Type %s", ContainerType) + Log("Container Type %s", ContainerType) osType := os.Getenv("OS_TYPE") - IsWindows = false + IsWindows = false // Linux if strings.Compare(strings.ToLower(osType), "windows") != 0 { Log("Reading configuration for Linux from %s", pluginConfPath) @@ -1572,7 +1572,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { SendException(message) time.Sleep(30 * time.Second) log.Fatalln(message) - } + } OMSEndpoint = "https://" + WorkspaceID + ".ods." + LogAnalyticsWorkspaceDomain + "/OperationalData.svc/PostJsonDataItems" // Populate Computer field containerHostName, err1 := ioutil.ReadFile(pluginConfig["container_host_file_path"]) @@ -1602,7 +1602,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } } else { // windows - IsWindows = true + IsWindows = true Computer = os.Getenv("HOSTNAME") WorkspaceID = os.Getenv("WSID") logAnalyticsDomain := os.Getenv("DOMAIN") @@ -1614,7 +1614,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { IsAADMSIAuthMode = false if strings.Compare(strings.ToLower(os.Getenv(AADMSIAuthMode)), "true") == 0 { IsAADMSIAuthMode = true - Log("AAD MSI Auth Mode Configured") + Log("AAD MSI Auth Mode Configured") } ResourceID = os.Getenv(envAKSResourceID) @@ -1689,13 +1689,13 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log(message) } - PluginConfiguration = pluginConfig + PluginConfiguration = pluginConfig ContainerLogsRoute := strings.TrimSpace(strings.ToLower(os.Getenv("AZMON_CONTAINER_LOGS_ROUTE"))) Log("AZMON_CONTAINER_LOGS_ROUTE:%s", ContainerLogsRoute) - ContainerLogsRouteV2 = false - ContainerLogsRouteADX = false + ContainerLogsRouteV2 = false + ContainerLogsRouteADX = false if strings.Compare(ContainerLogsRoute, ContainerLogsADXRoute) == 0 { //check if adx clusteruri, clientid & secret are set @@ -1728,14 +1728,14 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { Log("Routing container logs thru %s route...", ContainerLogsADXRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route...\n", ContainerLogsADXRoute) } - } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route + } else if strings.Compare(strings.ToLower(osType), "windows") != 0 { //for linux, oneagent will be default route ContainerLogsRouteV2 = true //default is mdsd route - if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { + if strings.Compare(ContainerLogsRoute, ContainerLogsV1Route) == 0 { ContainerLogsRouteV2 = false //fallback option when hiddensetting set } Log("Routing container logs thru %s route...", ContainerLogsRoute) fmt.Fprintf(os.Stdout, "Routing container logs thru %s route... \n", ContainerLogsRoute) - } + } if ContainerLogsRouteV2 == true { CreateMDSDClient(ContainerLogV2, ContainerType) @@ -1748,7 +1748,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { if IsWindows == false { // mdsd linux specific Log("Creating MDSD clients for KubeMonAgentEvents & InsightsMetrics") - CreateMDSDClient(KubeMonAgentEvents, ContainerType) + CreateMDSDClient(KubeMonAgentEvents, ContainerType) CreateMDSDClient(InsightsMetrics, ContainerType) } @@ -1787,7 +1787,7 @@ func InitializePlugin(pluginConfPath string, agentVersion string) { } MdsdInsightsMetricsTagName = MdsdInsightsMetricsSourceName - MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName + MdsdKubeMonAgentEventsTagName = MdsdKubeMonAgentEventsSourceName Log("ContainerLogsRouteADX: %v, IsWindows: %v, IsAADMSIAuthMode = %v \n", ContainerLogsRouteADX, IsWindows, IsAADMSIAuthMode) if !ContainerLogsRouteADX && IsWindows && IsAADMSIAuthMode { Log("defaultIngestionAuthTokenRefreshIntervalSeconds = %d \n", defaultIngestionAuthTokenRefreshIntervalSeconds)