diff --git a/installer/conf/telegraf-rs.conf b/installer/conf/telegraf-rs.conf index bde27f279..8e8665104 100644 --- a/installer/conf/telegraf-rs.conf +++ b/installer/conf/telegraf-rs.conf @@ -542,7 +542,7 @@ ## An array of urls to scrape metrics from. #urls = ["http://$NODE_IP:10255/metrics", "http://$NODE_IP:10255/metrics/cadvisor", "http://$NODE_IP:10254/metrics", "http://$NODE_IP:9100/metrics"] #fieldpass = ["kubelet_docker_operations", "kubelet_docker_operations_errors"] - interval: "$AZMON_RS_PROM_INTERVAL" + interval = "$AZMON_RS_PROM_INTERVAL" ## An array of urls to scrape metrics from. urls = ["$AZMON_RS_PROM_URLS"] diff --git a/installer/conf/telegraf-test-rs.conf b/installer/conf/telegraf-test-rs.conf new file mode 100644 index 000000000..4ece2bf8c --- /dev/null +++ b/installer/conf/telegraf-test-rs.conf @@ -0,0 +1,113 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "60s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = false + ## Specify the log file name. The empty string means to log to stderr. + logfile = "/var/opt/microsoft/docker-cimprov/log/telegraf.log" + + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +#Prometheus Custom Metrics +[[inputs.prometheus]] + ## An array of urls to scrape metrics from. + interval = "$AZMON_RS_PROM_INTERVAL" + + ## An array of urls to scrape metrics from. + #urls = ["http://$NODE_IP:10255/metrics", "http://$NODE_IP:10255/metrics/cadvisor", "http://$NODE_IP:10254/metrics", "http://$NODE_IP:9100/metrics"] + urls = ["$AZMON_RS_PROM_URLS"] + + #fieldpass = ["kubelet_docker_operations", "kubelet_docker_operations_errors"] + fieldpass = ["$AZMON_RS_PROM_FIELDPASS"] + fielddrop = ["$AZMON_RS_PROM_FIELDDROP"] + + ## An array of Kubernetes services to scrape metrics from. + #kubernetes_services = ["https://kube-state-metrics.monitoring:8443/metrics","https://kube-state-metrics.monitoring:9443/metrics","http://oce-scc-template-nginx-ingress-controller.oce-nginx:10254/metrics"] + kubernetes_services = ["$AZMON_RS_PROM_K8S_SERVICES"] + + ## Scrape Kubernetes pods for the following prometheus annotations: + ## - prometheus.io/scrape: Enable scraping for this pod + ## - prometheus.io/scheme: If the metrics endpoint is secured then you will need to + ## set this to `https` & most likely set the tls config. + ## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation. + ## - prometheus.io/port: If port is not 9102 use this annotation + monitor_kubernetes_pods = $AZMON_RS_PROM_MONITOR_PODS + + metric_version = 2 + url_tag = "scrapeUrl" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + ## Use TLS but skip chain & host verification + insecure_skip_verify = true diff --git a/installer/conf/telegraf-test.conf b/installer/conf/telegraf-test.conf new file mode 100644 index 000000000..f1a7880ad --- /dev/null +++ b/installer/conf/telegraf-test.conf @@ -0,0 +1,100 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "60s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "60s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default or when set to "0s", precision will be set to the same + ## timestamp order as the collection interval, with the maximum being 1s. + ## ie, when interval = "10s", precision will be "1s" + ## when interval = "250ms", precision will be "1ms" + ## Precision will NOT be used for service inputs. It is up to each individual + ## service input to set the timestamp at the appropriate precision. + ## Valid time units are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = false + ## Specify the log file name. The empty string means to log to stderr. + logfile = "/var/opt/microsoft/docker-cimprov/log/telegraf.log" + + ## Override default hostname, if empty use os.Hostname() + #hostname = "placeholder_hostname" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = true + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +#Prometheus Custom Metrics +[[inputs.prometheus]] + ## An array of urls to scrape metrics from. + interval = "$AZMON_DS_PROM_INTERVAL" + + ## An array of urls to scrape metrics from. + #urls = ["http://$NODE_IP:10255/metrics", "http://$NODE_IP:10255/metrics/cadvisor", "http://$NODE_IP:10254/metrics", "http://$NODE_IP:9100/metrics"] + urls = ["$AZMON_DS_PROM_URLS"] + + fieldpass = ["$AZMON_DS_PROM_FIELDPASS"] + fielddrop = ["$AZMON_DS_PROM_FIELDDROP"] + + metric_version = 2 + url_tag = "scrapeUrl" + + ## Use bearer token for authorization. ('bearer_token' takes priority) + bearer_token = "/var/run/secrets/kubernetes.io/serviceaccount/token" + + ## Specify timeout duration for slower prometheus clients (default is 3s) + response_timeout = "15s" + + ## Optional TLS Config + tls_ca = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + ## Use TLS but skip chain & host verification + insecure_skip_verify = true diff --git a/installer/conf/telegraf.conf b/installer/conf/telegraf.conf index fa1d72ea7..a83db55cf 100644 --- a/installer/conf/telegraf.conf +++ b/installer/conf/telegraf.conf @@ -572,7 +572,7 @@ ## prometheus custom metrics [[inputs.prometheus]] - interval: "$AZMON_DS_PROM_INTERVAL" + interval = "$AZMON_DS_PROM_INTERVAL" ## An array of urls to scrape metrics from. urls = ["$AZMON_DS_PROM_URLS"] diff --git a/installer/datafiles/base_container.data b/installer/datafiles/base_container.data index 58a74aa0a..5a18805be 100644 --- a/installer/datafiles/base_container.data +++ b/installer/datafiles/base_container.data @@ -110,9 +110,12 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/out_oms.conf; installer/conf/out_oms.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/telegraf.conf; installer/conf/telegraf.conf; 644; root; root /etc/opt/microsoft/docker-cimprov/telegraf-rs.conf; installer/conf/telegraf-rs.conf; 644; root; root -/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root +/opt/telegraf-test.conf; installer/conf/telegraf-test.conf; 644; root; root +/opt/telegraf-test-rs.conf; installer/conf/telegraf-test-rs.conf; 644; root; root +/opt/microsoft/docker-cimprov/bin/TelegrafTCPErrorTelemetry.sh; installer/scripts/TelegrafTCPErrorTelemetry.sh; 755; root; root /opt/livenessprobe.sh; installer/scripts/livenessprobe.sh; 755; root; root /opt/tomlparser.rb; installer/scripts/tomlparser.rb; 755; root; root +/opt/tomlparser-prom-customconfig.rb; installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root %Links /opt/omi/lib/libcontainer.${{SHLIB_EXT}}; /opt/microsoft/docker-cimprov/lib/libcontainer.${{SHLIB_EXT}}; 644; root; root diff --git a/installer/scripts/tomlparser-prom-customconfig.rb b/installer/scripts/tomlparser-prom-customconfig.rb new file mode 100644 index 000000000..5df83c89a --- /dev/null +++ b/installer/scripts/tomlparser-prom-customconfig.rb @@ -0,0 +1,184 @@ +#!/usr/local/bin/ruby + +require_relative "tomlrb" + +@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings" +@replicaset = "replicaset" +@daemonset = "daemonset" +@configSchemaVersion = "" + +# Use parser to parse the configmap toml file to a ruby structure +def parseConfigMap + begin + # Check to see if config map is created + if (File.file?(@promConfigMapMountPath)) + puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values for prometheus config map" + parsedConfig = Tomlrb.load_file(@promConfigMapMountPath, symbolize_keys: true) + puts "config::Successfully parsed mounted prometheus config map" + return parsedConfig + else + puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for prometheus scraping" + return nil + end + rescue => errorStr + puts "config::error::Exception while parsing toml config file for prometheus config: #{errorStr}, using defaults" + return nil + end +end + +def checkForTypeArray(arrayValue, arrayType) + if !arrayValue.nil? && arrayValue.kind_of?(Array) && arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType) + return true + else + return false + end +end + +def checkForType(variable, varType) + if !variable.nil? && variable.kind_of?(varType) + return true + else + return false + end +end + +# Use the ruby structure created after config parsing to set the right values to be used as environment variables +def populateSettingValuesFromConfigMap(parsedConfig) + # Checking to see if this is the daemonset or replicaset to parse config accordingly + controller = ENV["CONTROLLER_TYPE"] + if !controller.nil? + if !parsedConfig.nil? && !parsedConfig[:prometheus_data_collection_settings].nil? + if controller.casecmp(@replicaset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:cluster].nil? + #Get prometheus replicaset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:cluster][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:cluster][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:cluster][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls] + kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services] + monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(kubernetesServices, String) && + checkForTypeArray(urls, String) && + !monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby + puts "config::Successfully passed typecheck for config settings for replicaset" + # Write the settings to file, so that they can be set as environment variables + file = File.open("prom_config_env_var", "w") + if !file.nil? + file.write("export AZMON_RS_PROM_INTERVAL=#{interval}\n") + file.write("export TELEMETRY_RS_PROM_INTERVAL=\"#{interval}\"\n") + file.write("export AZMON_RS_PROM_FIELDPASS=\"#{fieldPass.join("\",\"")}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_RS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export AZMON_RS_PROM_FIELDDROP=#{fieldDrop.join("\",\"")}\n") + file.write("export TELEMETRY_RS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export AZMON_RS_PROM_K8S_SERVICES=#{kubernetesServices.join("\",\"")}\n") + file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n") + file.write("export AZMON_RS_PROM_URLS=#{urls.join("\",\"")}\n") + file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n") + file.write("export AZMON_RS_PROM_MONITOR_PODS=#{monitorKubernetesPods}\n") + file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n") + # Close file after writing all environment variables + file.close + puts "config::Successfully created custom config environment variable file for replicaset" + + #Also substitute these values in the test config file for telegraf + file_name = "telegraf-test-rs.conf" + text = File.read(file_name) + new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval) + new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPass.join("\",\"")) + new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDrop.join("\",\"")) + new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", urls.join("\",\"")) + new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", kubernetesServices.join("\",\"")) + new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", (monitorKubernetesPods ? "true" : "false")) + + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully replaced the settings in test telegraf config file for replicaset" + else + puts "config::error::Exception while opening file for writing prometheus replicaset config environment variables" + puts "****************End Prometheus Config Processing********************" + end + else + puts "config::Typecheck failed for prometheus config settings for replicaset, using defaults" + end # end of type check condition + rescue => errorStr + puts "config::error::Exception while reading config file for prometheus config for replicaset: #{errorStr}, using defaults" + puts "****************End Prometheus Config Processing********************" + end + elsif controller.casecmp(@daemonset) == 0 && !parsedConfig[:prometheus_data_collection_settings][:node].nil? + #Get prometheus daemonset custom config settings + begin + interval = parsedConfig[:prometheus_data_collection_settings][:node][:interval] + fieldPass = parsedConfig[:prometheus_data_collection_settings][:node][:fieldpass] + fieldDrop = parsedConfig[:prometheus_data_collection_settings][:node][:fielddrop] + urls = parsedConfig[:prometheus_data_collection_settings][:node][:urls] + + # Check for the right datattypes to enforce right setting values + if checkForType(interval, String) && + checkForTypeArray(fieldPass, String) && + checkForTypeArray(fieldDrop, String) && + checkForTypeArray(urls, String) + puts "config::Successfully passed typecheck for config settings for daemonset" + # Write the settings to file, so that they can be set as environment variables + file = File.open("prom_config_env_var", "w") + if !file.nil? + file.write("export AZMON_DS_PROM_INTERVAL=#{interval}\n") + file.write("export TELEMETRY_DS_PROM_INTERVAL=\"#{interval}\"\n") + file.write("export AZMON_DS_PROM_FIELDPASS=\"#{fieldPass.join("\",\"")}\"\n") + #Setting array lengths as environment variables for telemetry purposes + file.write("export TELEMETRY_DS_PROM_FIELDPASS_LENGTH=\"#{fieldPass.length}\"\n") + file.write("export AZMON_DS_PROM_FIELDDROP=#{fieldDrop.join("\",\"")}\n") + file.write("export TELEMETRY_DS_PROM_FIELDDROP_LENGTH=\"#{fieldDrop.length}\"\n") + file.write("export AZMON_DS_PROM_URLS=#{urls.join("\",\"")}\n") + file.write("export TELEMETRY_DS_PROM_URLS_LENGTH=#{urls.length}\n") + # Close file after writing all environment variables + file.close + puts "config::Successfully created custom config environment variable file for daemonset" + + #Also substitute these values in the test config file for telegraf + file_name = "telegraf-test.conf" + text = File.read(file_name) + new_contents = text.gsub("$AZMON_DS_PROM_INTERVAL", interval) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDPASS", fieldPass.join("\",\"")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_FIELDDROP", fieldDrop.join("\",\"")) + new_contents = new_contents.gsub("$AZMON_DS_PROM_URLS", urls.join("\",\"")) + # To write changes to the file, use: + File.open(file_name, "w") { |file| file.puts new_contents } + puts "config::Successfully replaced the settings in test telegraf config file for daemonset" + else + puts "config::error::Exception while opening file for writing prometheus daemonset config environment variables" + puts "****************End Prometheus Config Processing********************" + end + else + puts "config::Typecheck failed for prometheus config settings for daemonset, using defaults" + end # end of type check condition + rescue => errorStr + puts "config::error::Exception while reading config file for prometheus config for daemonset: #{errorStr}, using defaults" + puts "****************End Prometheus Config Processing********************" + end + end # end of controller type check + end + else + puts "config::error:: Controller undefined while processing prometheus config, using defaults" + end +end + +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Prometheus Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@promConfigMapMountPath)) + puts "config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults" + else + puts "config::No configmap mounted for prometheus custom config, using defaults" + end +end +puts "****************End Prometheus Config Processing********************" diff --git a/installer/scripts/tomlparser.rb b/installer/scripts/tomlparser.rb index 3e7f48045..c72e64127 100644 --- a/installer/scripts/tomlparser.rb +++ b/installer/scripts/tomlparser.rb @@ -82,7 +82,7 @@ def populateSettingValuesFromConfigMap(parsedConfig) if @collectStderrLogs && !stderrNamespaces.nil? if stderrNamespaces.kind_of?(Array) if !@stdoutExcludeNamespaces.nil? && !@stdoutExcludeNamespaces.empty? - stdoutNamespaces = @stdoutExcludeNamespaces.split(',') + stdoutNamespaces = @stdoutExcludeNamespaces.split(",") end # Checking only for the first element to be string because toml enforces the arrays to contain elements of same type if stderrNamespaces.length > 0 && stderrNamespaces[0].kind_of?(String) @@ -119,47 +119,47 @@ def populateSettingValuesFromConfigMap(parsedConfig) end end - @configSchemaVersion = ENV['AZMON_AGENT_CFG_SCHEMA_VERSION'] - puts "****************Start Config Processing********************" - if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp('v1') == 0 #note v1 is the only supported schema version , so hardcoding it - configMapSettings = parseConfigMap - if !configMapSettings.nil? - populateSettingValuesFromConfigMap(configMapSettings) - end - else - if (File.file?(@configMapMountPath)) - puts "config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults" - end - @excludePath = "*_kube-system_*.log" +@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"] +puts "****************Start Config Processing********************" +if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it + configMapSettings = parseConfigMap + if !configMapSettings.nil? + populateSettingValuesFromConfigMap(configMapSettings) + end +else + if (File.file?(@configMapMountPath)) + puts "config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults" end + @excludePath = "*_kube-system_*.log" +end - # Write the settings to file, so that they can be set as environment variables - file = File.open("config_env_var", "w") +# Write the settings to file, so that they can be set as environment variables +file = File.open("config_env_var", "w") - if !file.nil? - # This will be used in td-agent-bit.conf file to filter out logs - if (!@collectStdoutLogs && !@collectStderrLogs) - #Stop log tailing completely - @logTailPath = "/opt/nolog*.log" - @logExclusionRegexPattern = "stdout|stderr" - elsif !@collectStdoutLogs - @logExclusionRegexPattern = "stdout" - elsif !@collectStderrLogs - @logExclusionRegexPattern = "stderr" - end - file.write("export AZMON_COLLECT_STDOUT_LOGS=#{@collectStdoutLogs}\n") - file.write("export AZMON_LOG_TAIL_PATH=#{@logTailPath}\n") - file.write("export AZMON_LOG_EXCLUSION_REGEX_PATTERN=\"#{@logExclusionRegexPattern}\"\n") - file.write("export AZMON_STDOUT_EXCLUDED_NAMESPACES=#{@stdoutExcludeNamespaces}\n") - file.write("export AZMON_COLLECT_STDERR_LOGS=#{@collectStderrLogs}\n") - file.write("export AZMON_STDERR_EXCLUDED_NAMESPACES=#{@stderrExcludeNamespaces}\n") - file.write("export AZMON_CLUSTER_COLLECT_ENV_VAR=#{@collectClusterEnvVariables}\n") - file.write("export AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH=#{@excludePath}\n") - # Close file after writing all environment variables - file.close - puts "Both stdout & stderr log collection are turned off for namespaces: '#{@excludePath}' " - puts "****************End Config Processing********************" - else - puts "config::error::Exception while opening file for writing config environment variables" - puts "****************End Config Processing********************" +if !file.nil? + # This will be used in td-agent-bit.conf file to filter out logs + if (!@collectStdoutLogs && !@collectStderrLogs) + #Stop log tailing completely + @logTailPath = "/opt/nolog*.log" + @logExclusionRegexPattern = "stdout|stderr" + elsif !@collectStdoutLogs + @logExclusionRegexPattern = "stdout" + elsif !@collectStderrLogs + @logExclusionRegexPattern = "stderr" end + file.write("export AZMON_COLLECT_STDOUT_LOGS=#{@collectStdoutLogs}\n") + file.write("export AZMON_LOG_TAIL_PATH=#{@logTailPath}\n") + file.write("export AZMON_LOG_EXCLUSION_REGEX_PATTERN=\"#{@logExclusionRegexPattern}\"\n") + file.write("export AZMON_STDOUT_EXCLUDED_NAMESPACES=#{@stdoutExcludeNamespaces}\n") + file.write("export AZMON_COLLECT_STDERR_LOGS=#{@collectStderrLogs}\n") + file.write("export AZMON_STDERR_EXCLUDED_NAMESPACES=#{@stderrExcludeNamespaces}\n") + file.write("export AZMON_CLUSTER_COLLECT_ENV_VAR=#{@collectClusterEnvVariables}\n") + file.write("export AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH=#{@excludePath}\n") + # Close file after writing all environment variables + file.close + puts "Both stdout & stderr log collection are turned off for namespaces: '#{@excludePath}' " + puts "****************End Config Processing********************" +else + puts "config::error::Exception while opening file for writing config environment variables" + puts "****************End Config Processing********************" +end