diff --git a/installer/conf/container-health.conf b/installer/conf/container-health.conf
deleted file mode 100644
index e6edf41df..000000000
--- a/installer/conf/container-health.conf
+++ /dev/null
@@ -1,103 +0,0 @@
-# Fluentd config file for OMS Docker - container components (non kubeAPI)
-
-# Forward port 25225 for container logs
-
- type forward
- port 25225
- bind 127.0.0.1
-
-
-# Container inventory
-
- type containerinventory
- tag oms.containerinsights.containerinventory
- run_interval 60s
- log_level debug
-
-
-#cadvisor perf
-
- type cadvisorperf
- tag oms.api.cadvisorperf
- run_interval 60s
- log_level debug
-
-
-
- type filter_cadvisor_health_node
- log_level debug
-
-
-
-#custom_metrics_mdm filter plugin
-
- type filter_cadvisor2mdm
- custom_metrics_azure_regions eastus,southcentralus,westcentralus,westus2,southeastasia,northeurope,westeurope,southafricanorth,centralus,northcentralus,eastus2,koreacentral,eastasia,centralindia,uksouth,canadacentral
- metrics_to_collect cpuUsageNanoCores,memoryWorkingSetBytes,memoryRssBytes
- log_level info
-
-
-
- type out_oms
- log_level debug
- num_threads 5
- buffer_chunk_limit 20m
- buffer_type file
- buffer_path %STATE_DIR_WS%/out_oms_containerinventory*.buffer
- buffer_queue_limit 20
- buffer_queue_full_action drop_oldest_chunk
- flush_interval 20s
- retry_limit 10
- retry_wait 30s
- max_retry_wait 9m
-
-
-
- type out_oms
- log_level debug
- num_threads 5
- buffer_chunk_limit 20m
- buffer_type file
- buffer_path %STATE_DIR_WS%/out_oms_cadvisorperf*.buffer
- buffer_queue_limit 20
- buffer_queue_full_action drop_oldest_chunk
- flush_interval 20s
- retry_limit 10
- retry_wait 30s
- max_retry_wait 9m
-
-
-
-
- @type forward
- send_timeout 60s
- recover_wait 10s
- hard_timeout 60s
- heartbeat_type tcp
-
-
- host healthmodel-replicaset-service.kube-system
- port 25227
-
-
-
- @type file
- path /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log
-
-
-
-
- type out_mdm
- log_level debug
- num_threads 5
- buffer_chunk_limit 20m
- buffer_type file
- buffer_path %STATE_DIR_WS%/out_mdm_cdvisorperf*.buffer
- buffer_queue_limit 20
- buffer_queue_full_action drop_oldest_chunk
- flush_interval 20s
- retry_limit 10
- retry_wait 30s
- max_retry_wait 9m
- retry_mdm_post_wait_minutes 60
-
diff --git a/installer/conf/container.conf b/installer/conf/container.conf
index e68e4ff64..5f08043c7 100755
--- a/installer/conf/container.conf
+++ b/installer/conf/container.conf
@@ -23,6 +23,16 @@
log_level debug
+
+ type filter_cadvisor_health_node
+ log_level debug
+
+
+
+ type filter_cadvisor_health_container
+ log_level debug
+
+
#custom_metrics_mdm filter plugin
type filter_cadvisor2mdm
@@ -61,6 +71,27 @@
max_retry_wait 9m
+
+
+ @type health_forward
+ send_timeout 60s
+ recover_wait 10s
+ hard_timeout 60s
+ heartbeat_type tcp
+ skip_network_error_at_init true
+ expire_dns_cache 600s
+
+
+ host "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_HOST']}"
+ port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}"
+
+
+
+ @type file
+ path /var/opt/microsoft/docker-cimprov/log/fluent_forward_failed.log
+
+
+
type out_mdm
log_level debug
diff --git a/installer/conf/health_model_definition.json b/installer/conf/health_model_definition.json
index 1112fe158..e6c9e1808 100644
--- a/installer/conf/health_model_definition.json
+++ b/installer/conf/health_model_definition.json
@@ -23,6 +23,61 @@
"container.azm.ms/cluster-name"
]
},
+ {
+ "monitor_id": "container",
+ "labels": [
+ "container.azm.ms/namespace",
+ "container.azm.ms/workload-name",
+ "container.azm.ms/workload-kind",
+ "container.azm.ms/container",
+ "container.azm.ms/cluster-region",
+ "container.azm.ms/cluster-subscription-id",
+ "container.azm.ms/cluster-resource-group",
+ "container.azm.ms/cluster-name"
+ ],
+ "parent_monitor_id": [
+ {
+ "label": "container.azm.ms/namespace",
+ "operator": "==",
+ "value": "kube-system",
+ "id": "system_workload"
+ },
+ {
+ "label": "container.azm.ms/namespace",
+ "operator": "!=",
+ "value": "kube-system",
+ "id": "user_workload"
+ }
+ ]
+ },
+ {
+ "monitor_id": "container_cpu_utilization",
+ "parent_monitor_id": "container",
+ "labels": [
+ "container.azm.ms/namespace",
+ "container.azm.ms/workload-name",
+ "container.azm.ms/workload-kind",
+ "container.azm.ms/container",
+ "container.azm.ms/cluster-region",
+ "container.azm.ms/cluster-subscription-id",
+ "container.azm.ms/cluster-resource-group",
+ "container.azm.ms/cluster-name"
+ ]
+ },
+ {
+ "monitor_id": "container_memory_utilization",
+ "parent_monitor_id": "container",
+ "labels": [
+ "container.azm.ms/namespace",
+ "container.azm.ms/workload-name",
+ "container.azm.ms/workload-kind",
+ "container.azm.ms/container",
+ "container.azm.ms/cluster-region",
+ "container.azm.ms/cluster-subscription-id",
+ "container.azm.ms/cluster-resource-group",
+ "container.azm.ms/cluster-name"
+ ]
+ },
{
"monitor_id": "system_workload_pods_ready",
"parent_monitor_id": "system_workload",
@@ -104,6 +159,9 @@
"kubernetes.io/hostname",
"agentpool",
"kubernetes.io/role",
+ "node-role.kubernetes.io/master",
+ "node-role.kubernetes.io/compute",
+ "node-role.kubernetes.io/infra",
"container.azm.ms/cluster-region",
"container.azm.ms/cluster-subscription-id",
"container.azm.ms/cluster-resource-group",
@@ -117,6 +175,9 @@
"kubernetes.io/hostname",
"agentpool",
"kubernetes.io/role",
+ "node-role.kubernetes.io/master",
+ "node-role.kubernetes.io/compute",
+ "node-role.kubernetes.io/infra",
"container.azm.ms/cluster-region",
"container.azm.ms/cluster-subscription-id",
"container.azm.ms/cluster-resource-group",
@@ -130,6 +191,9 @@
"kubernetes.io/hostname",
"agentpool",
"kubernetes.io/role",
+ "node-role.kubernetes.io/master",
+ "node-role.kubernetes.io/compute",
+ "node-role.kubernetes.io/infra",
"container.azm.ms/cluster-region",
"container.azm.ms/cluster-subscription-id",
"container.azm.ms/cluster-resource-group",
@@ -143,12 +207,33 @@
"kubernetes.io/hostname",
"agentpool",
"kubernetes.io/role",
+ "node-role.kubernetes.io/master",
+ "node-role.kubernetes.io/compute",
+ "node-role.kubernetes.io/infra",
"container.azm.ms/cluster-region",
"container.azm.ms/cluster-subscription-id",
"container.azm.ms/cluster-resource-group",
"container.azm.ms/cluster-name"
],
"parent_monitor_id": [
+ {
+ "label": "node-role.kubernetes.io/master",
+ "operator": "==",
+ "value": "true",
+ "id": "master_node_pool"
+ },
+ {
+ "label": "node-role.kubernetes.io/compute",
+ "operator": "==",
+ "value": "true",
+ "id": "agent_node_pool"
+ },
+ {
+ "label": "node-role.kubernetes.io/infra",
+ "operator": "==",
+ "value": "true",
+ "id": "agent_node_pool"
+ },
{
"label": "kubernetes.io/role",
"operator": "==",
@@ -161,14 +246,16 @@
"value": "agent",
"id": "agent_node_pool"
}
- ]
+ ],
+ "default_parent_monitor_id": "agent_node_pool"
},
{
"monitor_id": "master_node_pool",
"aggregation_algorithm": "percentage",
"aggregation_algorithm_params": {
"critical_threshold": 80.0,
- "warning_threshold": 90.0
+ "warning_threshold": 90.0,
+ "state_threshold": 80.0
},
"parent_monitor_id": "all_nodes",
"labels": [
diff --git a/installer/conf/healthmonitorconfig.json b/installer/conf/healthmonitorconfig.json
index 28d562652..ea6b23856 100644
--- a/installer/conf/healthmonitorconfig.json
+++ b/installer/conf/healthmonitorconfig.json
@@ -2,30 +2,41 @@
"node_cpu_utilization": {
"WarnThresholdPercentage": 80.0,
"FailThresholdPercentage": 90.0,
- "ConsecutiveSamplesForStateTransition": 3
+ "ConsecutiveSamplesForStateTransition": 3,
+ "Operator": ">"
},
"node_memory_utilization": {
"WarnThresholdPercentage": 80.0,
"FailThresholdPercentage": 90.0,
- "ConsecutiveSamplesForStateTransition": 3
+ "ConsecutiveSamplesForStateTransition": 3,
+ "Operator": ">"
},
"container_cpu_utilization": {
"WarnThresholdPercentage": 80.0,
"FailThresholdPercentage": 90.0,
- "ConsecutiveSamplesForStateTransition": 3
+ "StateThresholdPercentage": 90.0,
+ "ConsecutiveSamplesForStateTransition": 3,
+ "Operator": ">"
},
"container_memory_utilization": {
"WarnThresholdPercentage": 80.0,
"FailThresholdPercentage": 90.0,
- "ConsecutiveSamplesForStateTransition": 3
+ "StateThresholdPercentage": 90.0,
+ "ConsecutiveSamplesForStateTransition": 3,
+ "Operator": ">"
},
"user_workload_pods_ready": {
- "WarnThresholdPercentage": 0.0,
- "FailThresholdPercentage": 10.0,
- "ConsecutiveSamplesForStateTransition": 2
+ "WarnThresholdPercentage": 100.0,
+ "FailThresholdPercentage": 90.0,
+ "ConsecutiveSamplesForStateTransition": 2,
+ "Operator": "<"
},
"system_workload_pods_ready": {
- "FailThresholdPercentage": 0.0,
- "ConsecutiveSamplesForStateTransition": 2
+ "FailThresholdPercentage": 100.0,
+ "ConsecutiveSamplesForStateTransition": 2,
+ "Operator": "<"
+ },
+ "node_condition": {
+ "NodeConditionTypesForFailedState": "outofdisk,networkunavailable"
}
}
\ No newline at end of file
diff --git a/installer/conf/kube.conf b/installer/conf/kube.conf
index 8e1f6ae88..40f4ac880 100644
--- a/installer/conf/kube.conf
+++ b/installer/conf/kube.conf
@@ -1,7 +1,7 @@
# Fluentd config file for OMS Docker - cluster components (kubeAPI)
type forward
- port 25227
+ port "#{ENV['HEALTHMODEL_REPLICASET_SERVICE_SERVICE_PORT']}"
bind 0.0.0.0
@@ -234,14 +234,17 @@
max_retry_wait 9m
-
- type out_oms_api
+
+ type out_oms
log_level debug
- buffer_chunk_limit 10m
+ num_threads 5
+ buffer_chunk_limit 20m
buffer_type file
- buffer_path %STATE_DIR_WS%/out_oms_api_KubeHealth*.buffer
- buffer_queue_limit 10
+ buffer_path %STATE_DIR_WS%/out_oms_kubehealth*.buffer
+ buffer_queue_limit 20
+ buffer_queue_full_action drop_oldest_chunk
flush_interval 20s
retry_limit 10
retry_wait 30s
+ max_retry_wait 9m
\ No newline at end of file
diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf
index 4e3de6c46..6a1bf3e3e 100644
--- a/installer/conf/td-agent-bit.conf
+++ b/installer/conf/td-agent-bit.conf
@@ -28,6 +28,7 @@
Path /var/log/containers/omsagent*.log
DB /var/opt/microsoft/docker-cimprov/state/omsagent-ai.db
DB.Sync Off
+ Parser docker
Mem_Buf_Limit 1m
Path_Key filepath
Skip_Long_Lines On
@@ -51,7 +52,6 @@
[FILTER]
Name grep
Match oms.container.log.flbplugin.*
- Exclude log E! [\[]inputs.prometheus[\]]
[OUTPUT]
Name oms
diff --git a/installer/conf/telegraf-rs.conf b/installer/conf/telegraf-rs.conf
index ce60bfa04..3450ab88f 100644
--- a/installer/conf/telegraf-rs.conf
+++ b/installer/conf/telegraf-rs.conf
@@ -552,7 +552,7 @@
## set this to `https` & most likely set the tls config.
## - prometheus.io/path: If the metrics path is not /metrics, define it with this annotation.
## - prometheus.io/port: If port is not 9102 use this annotation
- monitor_kubernetes_pods = $AZMON_RS_PROM_MONITOR_PODS
+ $AZMON_RS_PROM_MONITOR_PODS
fieldpass = $AZMON_RS_PROM_FIELDPASS
fielddrop = $AZMON_RS_PROM_FIELDDROP
@@ -579,6 +579,7 @@
insecure_skip_verify = true
#tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"]
+$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER
# [[inputs.exec]]
# ## Commands array
# interval = "15m"
diff --git a/installer/conf/telegraf.conf b/installer/conf/telegraf.conf
index 4883de81b..cd22a56b4 100644
--- a/installer/conf/telegraf.conf
+++ b/installer/conf/telegraf.conf
@@ -566,7 +566,8 @@
## Use TLS but skip chain & host verification
insecure_skip_verify = true
#tagexclude = ["AgentVersion","AKS_RESOURCE_ID","ACS_RESOURCE_NAME", "Region", "ClusterName", "ClusterType", "Computer", "ControllerType"]
-
+ [inputs.prometheus.tagpass]
+ operation_type = ["create_container", "remove_container", "pull_image"]
## prometheus custom metrics
[[inputs.prometheus]]
diff --git a/installer/datafiles/base_container.data b/installer/datafiles/base_container.data
index 0ea3bc984..981f51f4c 100644
--- a/installer/datafiles/base_container.data
+++ b/installer/datafiles/base_container.data
@@ -30,7 +30,6 @@ MAINTAINER: 'Microsoft Corporation'
/opt/microsoft/omsagent/plugin/KubernetesApiClient.rb; source/code/plugin/KubernetesApiClient.rb; 644; root; root
/etc/opt/microsoft/docker-cimprov/container.conf; installer/conf/container.conf; 644; root; root
-/etc/opt/microsoft/docker-cimprov/container-health.conf; installer/conf/container-health.conf; 644; root; root
/opt/microsoft/omsagent/plugin/CAdvisorMetricsAPIClient.rb; source/code/plugin/CAdvisorMetricsAPIClient.rb; 644; root; root
/opt/microsoft/omsagent/plugin/in_kube_perf.rb; source/code/plugin/in_kube_perf.rb; 644; root; root
@@ -116,20 +115,23 @@ MAINTAINER: 'Microsoft Corporation'
/opt/tomlparser.rb; installer/scripts/tomlparser.rb; 755; root; root
/opt/tomlparser-prom-customconfig.rb; installer/scripts/tomlparser-prom-customconfig.rb; 755; root; root
/opt/td-agent-bit-conf-customizer.rb; installer/scripts/td-agent-bit-conf-customizer.rb; 755; root; root
+/opt/ConfigParseErrorLogger.rb; installer/scripts/ConfigParseErrorLogger.rb; 755; root; root
-
+/opt/microsoft/omsagent/plugin/filter_cadvisor_health_container.rb; source/code/plugin/filter_cadvisor_health_container.rb; 644; root; root
/opt/microsoft/omsagent/plugin/filter_cadvisor_health_node.rb; source/code/plugin/filter_cadvisor_health_node.rb; 644; root; root
/opt/microsoft/omsagent/plugin/filter_health_model_builder.rb; source/code/plugin/filter_health_model_builder.rb; 644; root; root
/opt/microsoft/omsagent/plugin/in_kube_health.rb; source/code/plugin/in_kube_health.rb; 644; root; root
+/opt/microsoft/omsagent/plugin/out_health_forward.rb; source/code/plugin/out_health_forward.rb; 644; root; root
/etc/opt/microsoft/docker-cimprov/health/healthmonitorconfig.json; installer/conf/healthmonitorconfig.json; 644; root; root
/etc/opt/microsoft/docker-cimprov/health/health_model_definition.json; installer/conf/health_model_definition.json; 644; root; root
-
/opt/microsoft/omsagent/plugin/health/aggregate_monitor.rb; source/code/plugin/health/aggregate_monitor.rb; 644; root; root
-/opt/microsoft/omsagent/plugin/health/agg_monitor_id_labels.rb; source/code/plugin/health/agg_monitor_id_labels.rb; 644; root; root
+/opt/microsoft/omsagent/plugin/health/agg_monitor_id_labels.rb; source/code/plugin/health/agg_monitor_id_labels.rb; 644; root; root
/opt/microsoft/omsagent/plugin/health/aggregate_monitor_state_finalizer.rb; source/code/plugin/health/aggregate_monitor_state_finalizer.rb; 644; root; root
/opt/microsoft/omsagent/plugin/health/cluster_health_state.rb; source/code/plugin/health/cluster_health_state.rb; 644; root; root
+/opt/microsoft/omsagent/plugin/health/health_container_cpu_memory_aggregator.rb; source/code/plugin/health/health_container_cpu_memory_aggregator.rb; 644; root; root
+/opt/microsoft/omsagent/plugin/health/health_container_cpu_memory_record_formatter.rb; source/code/plugin/health/health_container_cpu_memory_record_formatter.rb; 644; root; root
/opt/microsoft/omsagent/plugin/health/health_hierarchy_builder.rb; source/code/plugin/health/health_hierarchy_builder.rb; 644; root; root
/opt/microsoft/omsagent/plugin/health/health_kubernetes_resources.rb; source/code/plugin/health/health_kubernetes_resources.rb; 644; root; root
/opt/microsoft/omsagent/plugin/health/health_kube_api_down_handler.rb; source/code/plugin/health/health_kube_api_down_handler.rb; 644; root; root
@@ -258,9 +260,6 @@ chown omsagent:omiusers /var/opt/microsoft/docker-cimprov/log/fluent_forward_fai
mv /etc/opt/microsoft/docker-cimprov/container.conf /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf
chown omsagent:omsagent /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container.conf
-mv /etc/opt/microsoft/docker-cimprov/container-health.conf /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container-health.conf
-chown omsagent:omsagent /etc/opt/microsoft/omsagent/sysconf/omsagent.d/container-health.conf
-
%Postuninstall_10
# If we're an upgrade, skip all of this cleanup
if ${{PERFORMING_UPGRADE_NOT}}; then
@@ -272,7 +271,6 @@ if ${{PERFORMING_UPGRADE_NOT}}; then
rm -f /var/opt/microsoft/docker-cimprov/log/kubernetes_client_log.txt
rm -f /var/opt/microsoft/docker-cimprov/log/kubernetes_perf_log.txt
rm -f /etc/opt/microsoft/omsagent/conf/omsagent.d/container.conf
- rm -f /etc/opt/microsoft/omsagent/conf/omsagent.d/container-health.conf
rmdir /var/opt/microsoft/docker-cimprov/log 2> /dev/null
rmdir /var/opt/microsoft/docker-cimprov/state/ContainerInventory 2> /dev/null
rmdir /var/opt/microsoft/docker-cimprov/state/ImageInventory 2> /dev/null
diff --git a/installer/scripts/ConfigParseErrorLogger.rb b/installer/scripts/ConfigParseErrorLogger.rb
new file mode 100644
index 000000000..5d6db8016
--- /dev/null
+++ b/installer/scripts/ConfigParseErrorLogger.rb
@@ -0,0 +1,21 @@
+#!/usr/local/bin/ruby
+# frozen_string_literal: true
+
+class ConfigParseErrorLogger
+ require "json"
+
+ def initialize
+ end
+
+ class << self
+ def logError(message)
+ begin
+ errorMessage = "config::error::" + message
+ jsonMessage = errorMessage.to_json
+ STDERR.puts jsonMessage
+ rescue => errorStr
+ puts "Error in ConfigParserErrorLogger::logError: #{errorStr}"
+ end
+ end
+ end
+end
diff --git a/installer/scripts/td-agent-bit-conf-customizer.rb b/installer/scripts/td-agent-bit-conf-customizer.rb
index 1e62e3cc2..fae3acb36 100644
--- a/installer/scripts/td-agent-bit-conf-customizer.rb
+++ b/installer/scripts/td-agent-bit-conf-customizer.rb
@@ -1,4 +1,5 @@
#!/usr/local/bin/ruby
+require_relative "ConfigParseErrorLogger"
@td_agent_bit_conf_path = "/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf"
@@ -40,7 +41,7 @@ def substituteFluentBitPlaceHolders
File.open(@td_agent_bit_conf_path, "w") { |file| file.puts new_contents }
puts "config::Successfully substituted the placeholders in td-agent-bit.conf file"
rescue => errorStr
- puts "td-agent-bit-config-customizer: error while substituting values: #{errorStr}"
+ ConfigParseErrorLogger.logError("td-agent-bit-config-customizer: error while substituting values in td-agent-bit.conf file: #{errorStr}")
end
end
diff --git a/installer/scripts/tomlparser-prom-customconfig.rb b/installer/scripts/tomlparser-prom-customconfig.rb
index d9fdf1cc2..7aad580ee 100644
--- a/installer/scripts/tomlparser-prom-customconfig.rb
+++ b/installer/scripts/tomlparser-prom-customconfig.rb
@@ -1,6 +1,7 @@
#!/usr/local/bin/ruby
require_relative "tomlrb"
+require_relative "ConfigParseErrorLogger"
require "fileutils"
@promConfigMapMountPath = "/etc/config/settings/prometheus-data-collection-settings"
@@ -18,6 +19,14 @@
@defaultRsK8sServices = []
@defaultRsMonitorPods = false
+#Configurations to be used for the auto-generated input prometheus plugins for namespace filtering
+@metricVersion = 2
+@urlTag = "scrapeUrl"
+@bearerToken = "/var/run/secrets/kubernetes.io/serviceaccount/token"
+@responseTimeout = "15s"
+@tlsCa = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+@insecureSkipVerify = true
+
# Use parser to parse the configmap toml file to a ruby structure
def parseConfigMap
begin
@@ -32,13 +41,13 @@ def parseConfigMap
return nil
end
rescue => errorStr
- puts "config::error::Exception while parsing toml config file for prometheus config: #{errorStr}, using defaults"
+ ConfigParseErrorLogger.logError("Exception while parsing config map for prometheus config: #{errorStr}, using defaults, please check config map for errors")
return nil
end
end
def checkForTypeArray(arrayValue, arrayType)
- if (arrayValue.nil? || (arrayValue.kind_of?(Array) && arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType)))
+ if (arrayValue.nil? || (arrayValue.kind_of?(Array) && ((arrayValue.length == 0) || (arrayValue.length > 0 && arrayValue[0].kind_of?(arrayType)))))
return true
else
return false
@@ -53,6 +62,48 @@ def checkForType(variable, varType)
end
end
+def replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods)
+ begin
+ new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", ("monitor_kubernetes_pods = #{monitorKubernetesPods}"))
+ new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", "")
+ rescue => errorStr
+ puts "Exception while replacing default pod monitor settings: #{errorStr}"
+ end
+ return new_contents
+end
+
+def createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting)
+ begin
+ new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", "# Commenting this out since new plugins will be created per namespace\n # $AZMON_RS_PROM_MONITOR_PODS")
+ pluginConfigsWithNamespaces = ""
+ monitorKubernetesPodsNamespaces.each do |namespace|
+ if !namespace.nil?
+ #Stripping namespaces to remove leading and trailing whitespaces
+ namespace.strip!
+ if namespace.length > 0
+ pluginConfigsWithNamespaces += "\n[[inputs.prometheus]]
+ interval = \"#{interval}\"
+ monitor_kubernetes_pods = true
+ monitor_kubernetes_pods_namespace = \"#{namespace}\"
+ fieldpass = #{fieldPassSetting}
+ fielddrop = #{fieldDropSetting}
+ metric_version = #{@metricVersion}
+ url_tag = \"#{@urlTag}\"
+ bearer_token = \"#{@bearerToken}\"
+ response_timeout = \"#{@responseTimeout}\"
+ tls_ca = \"#{@tlsCa}\"
+ insecure_skip_verify = #{@insecureSkipVerify}\n"
+ end
+ end
+ end
+ new_contents = new_contents.gsub("$AZMON_RS_PROM_PLUGINS_WITH_NAMESPACE_FILTER", pluginConfigsWithNamespaces)
+ return new_contents
+ rescue => errorStr
+ puts "Exception while creating prometheus input plugins to filter namespaces: #{errorStr}, using defaults"
+ replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods)
+ end
+end
+
# Use the ruby structure created after config parsing to set the right values to be used as environment variables
def populateSettingValuesFromConfigMap(parsedConfig)
# Checking to see if this is the daemonset or replicaset to parse config accordingly
@@ -68,6 +119,7 @@ def populateSettingValuesFromConfigMap(parsedConfig)
urls = parsedConfig[:prometheus_data_collection_settings][:cluster][:urls]
kubernetesServices = parsedConfig[:prometheus_data_collection_settings][:cluster][:kubernetes_services]
monitorKubernetesPods = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods]
+ monitorKubernetesPodsNamespaces = parsedConfig[:prometheus_data_collection_settings][:cluster][:monitor_kubernetes_pods_namespaces]
# Check for the right datattypes to enforce right setting values
if checkForType(interval, String) &&
@@ -75,7 +127,7 @@ def populateSettingValuesFromConfigMap(parsedConfig)
checkForTypeArray(fieldDrop, String) &&
checkForTypeArray(kubernetesServices, String) &&
checkForTypeArray(urls, String) &&
- !monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby
+ (monitorKubernetesPods.nil? || (!monitorKubernetesPods.nil? && (!!monitorKubernetesPods == monitorKubernetesPods))) #Checking for Boolean type, since 'Boolean' is not defined as a type in ruby
puts "config::Successfully passed typecheck for config settings for replicaset"
#if setting is nil assign default values
interval = (interval.nil?) ? @defaultRsInterval : interval
@@ -83,7 +135,7 @@ def populateSettingValuesFromConfigMap(parsedConfig)
fieldDrop = (fieldDrop.nil?) ? @defaultRsFieldDrop : fieldDrop
kubernetesServices = (kubernetesServices.nil?) ? @defaultRsK8sServices : kubernetesServices
urls = (urls.nil?) ? @defaultRsPromUrls : urls
- monitorKubernetesPods = (kubernetesServices.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods
+ monitorKubernetesPods = (monitorKubernetesPods.nil?) ? @defaultRsMonitorPods : monitorKubernetesPods
file_name = "/opt/telegraf-test-rs.conf"
# Copy the telegraf config file to a temp file to run telegraf in test mode with this config
@@ -93,11 +145,24 @@ def populateSettingValuesFromConfigMap(parsedConfig)
#Replace the placeholder config values with values from custom config
text = File.read(file_name)
new_contents = text.gsub("$AZMON_RS_PROM_INTERVAL", interval)
- new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", ((fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]"))
- new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", ((fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]"))
+ fieldPassSetting = (fieldPass.length > 0) ? ("[\"" + fieldPass.join("\",\"") + "\"]") : "[]"
+ new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDPASS", fieldPassSetting)
+ fieldDropSetting = (fieldDrop.length > 0) ? ("[\"" + fieldDrop.join("\",\"") + "\"]") : "[]"
+ new_contents = new_contents.gsub("$AZMON_RS_PROM_FIELDDROP", fieldDropSetting)
new_contents = new_contents.gsub("$AZMON_RS_PROM_URLS", ((urls.length > 0) ? ("[\"" + urls.join("\",\"") + "\"]") : "[]"))
new_contents = new_contents.gsub("$AZMON_RS_PROM_K8S_SERVICES", ((kubernetesServices.length > 0) ? ("[\"" + kubernetesServices.join("\",\"") + "\"]") : "[]"))
- new_contents = new_contents.gsub("$AZMON_RS_PROM_MONITOR_PODS", (monitorKubernetesPods ? "true" : "false"))
+
+ # Check to see if monitor_kubernetes_pods is set to true with a valid setting for monitor_kubernetes_namespaces to enable scraping for specific namespaces
+ # Adding nil check here as well since checkForTypeArray returns true even if setting is nil to accomodate for other settings to be able -
+ # - to use defaults in case of nil settings
+ if monitorKubernetesPods && !monitorKubernetesPodsNamespaces.nil? && checkForTypeArray(monitorKubernetesPodsNamespaces, String)
+ new_contents = createPrometheusPluginsWithNamespaceSetting(monitorKubernetesPods, monitorKubernetesPodsNamespaces, new_contents, interval, fieldPassSetting, fieldDropSetting)
+ monitorKubernetesPodsNamespacesLength = monitorKubernetesPodsNamespaces.length
+ else
+ new_contents = replaceDefaultMonitorPodSettings(new_contents, monitorKubernetesPods)
+ monitorKubernetesPodsNamespacesLength = 0
+ end
+
File.open(file_name, "w") { |file| file.puts new_contents }
puts "config::Successfully substituted the placeholders in telegraf conf file for replicaset"
#Set environment variables for telemetry
@@ -110,15 +175,17 @@ def populateSettingValuesFromConfigMap(parsedConfig)
file.write("export TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH=#{kubernetesServices.length}\n")
file.write("export TELEMETRY_RS_PROM_URLS_LENGTH=#{urls.length}\n")
file.write("export TELEMETRY_RS_PROM_MONITOR_PODS=\"#{monitorKubernetesPods}\"\n")
+ file.write("export TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH=\"#{monitorKubernetesPodsNamespacesLength}\"\n")
+
# Close file after writing all environment variables
file.close
puts "config::Successfully created telemetry file for replicaset"
end
else
- puts "config::Typecheck failed for prometheus config settings for replicaset, using defaults"
+ ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for replicaset, using defaults, please use right types for all settings")
end # end of type check condition
rescue => errorStr
- puts "config::error::Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults"
+ ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for replicaset: #{errorStr}, using defaults")
setRsPromDefaults
puts "****************End Prometheus Config Processing********************"
end
@@ -170,16 +237,16 @@ def populateSettingValuesFromConfigMap(parsedConfig)
puts "config::Successfully created telemetry file for daemonset"
end
else
- puts "config::Typecheck failed for prometheus config settings for daemonset, using defaults"
+ ConfigParseErrorLogger.logError("Typecheck failed for prometheus config settings for daemonset, using defaults, please use right types for all settings")
end # end of type check condition
rescue => errorStr
- puts "config::error::Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults"
+ ConfigParseErrorLogger.logError("Exception while parsing config file for prometheus config for daemonset: #{errorStr}, using defaults, please check correctness of configmap")
puts "****************End Prometheus Config Processing********************"
end
end # end of controller type check
end
else
- puts "config::error:: Controller undefined while processing prometheus config, using defaults"
+ ConfigParseErrorLogger.logError("Controller undefined while processing prometheus config, using defaults")
end
end
@@ -192,7 +259,7 @@ def populateSettingValuesFromConfigMap(parsedConfig)
end
else
if (File.file?(@promConfigMapMountPath))
- puts "config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults"
+ ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported version")
else
puts "config::No configmap mounted for prometheus custom config, using defaults"
end
diff --git a/installer/scripts/tomlparser.rb b/installer/scripts/tomlparser.rb
index 067586629..cd16cbf9b 100644
--- a/installer/scripts/tomlparser.rb
+++ b/installer/scripts/tomlparser.rb
@@ -1,10 +1,9 @@
#!/usr/local/bin/ruby
require_relative "tomlrb"
-require 'json'
+require_relative "ConfigParseErrorLogger"
-@log_settings_config_map_mount_path = "/etc/config/settings/log-data-collection-settings"
-@agent_settings_config_map_mount_path = "/etc/config/settings/agent-settings"
+@configMapMountPath = "/etc/config/settings/log-data-collection-settings"
@configVersion = ""
@configSchemaVersion = ""
# Setting default values which will be used in case they are not set in the configmap or if configmap doesnt exist
@@ -18,21 +17,21 @@
@excludePath = "*.csv2" #some invalid path
# Use parser to parse the configmap toml file to a ruby structure
-def parseConfigMap(path)
+def parseConfigMap
begin
# Check to see if config map is created
- if (File.file?(path))
- puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values from #{path}"
- parsedConfig = Tomlrb.load_file(path, symbolize_keys: true)
- puts "config::Successfully parsed mounted config map from #{path}"
+ if (File.file?(@configMapMountPath))
+ puts "config::configmap container-azm-ms-agentconfig for settings mounted, parsing values"
+ parsedConfig = Tomlrb.load_file(@configMapMountPath, symbolize_keys: true)
+ puts "config::Successfully parsed mounted config map"
return parsedConfig
else
- puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults for #{path}"
+ puts "config::configmap container-azm-ms-agentconfig for settings not mounted, using defaults"
@excludePath = "*_kube-system_*.log"
return nil
end
rescue => errorStr
- puts "config::error::Exception while parsing toml config file: #{errorStr}, using defaults"
+ ConfigParseErrorLogger.logError("Exception while parsing config map for log collection/env variable settings: #{errorStr}, using defaults, please check config map for errors")
@excludePath = "*_kube-system_*.log"
return nil
end
@@ -69,7 +68,7 @@ def populateSettingValuesFromConfigMap(parsedConfig)
end
end
rescue => errorStr
- puts "config::error::Exception while reading config settings for stdout log collection - #{errorStr}, using defaults"
+ ConfigParseErrorLogger.logError("Exception while reading config map settings for stdout log collection - #{errorStr}, using defaults, please check config map for errors")
end
#Get stderr log config settings
@@ -106,7 +105,7 @@ def populateSettingValuesFromConfigMap(parsedConfig)
end
end
rescue => errorStr
- puts "config::error:Exception while reading config settings for stderr log collection - #{errorStr}, using defaults"
+ ConfigParseErrorLogger.logError("Exception while reading config map settings for stderr log collection - #{errorStr}, using defaults, please check config map for errors")
end
#Get environment variables log config settings
@@ -116,38 +115,22 @@ def populateSettingValuesFromConfigMap(parsedConfig)
puts "config::Using config map setting for cluster level environment variable collection"
end
rescue => errorStr
- puts "config::error::Exception while reading config settings for cluster level environment variable collection - #{errorStr}, using defaults"
+ ConfigParseErrorLogger.logError("Exception while reading config map settings for cluster level environment variable collection - #{errorStr}, using defaults, please check config map for errors")
end
end
-
- begin
- if !parsedConfig.nil? && !parsedConfig[:agent_settings][:health_model].nil? && !parsedConfig[:agent_settings][:health_model][:enabled].nil?
- @enable_health_model = parsedConfig[:agent_settings][:health_model][:enabled]
- puts "enable_health_model = #{@enable_health_model}"
- end
- rescue => errorStr
- puts "config::error:Exception while reading config settings for health_model enabled setting - #{errorStr}, using defaults"
- @enable_health_model = false
- end
end
@configSchemaVersion = ENV["AZMON_AGENT_CFG_SCHEMA_VERSION"]
puts "****************Start Config Processing********************"
if !@configSchemaVersion.nil? && !@configSchemaVersion.empty? && @configSchemaVersion.strip.casecmp("v1") == 0 #note v1 is the only supported schema version , so hardcoding it
- configMapSettings = {}
-
- #iterate over every *settings file and build a hash of settings
- Dir["/etc/config/settings/*settings"].each{|file|
- puts "Parsing File #{file}"
- settings = parseConfigMap(file)
- configMapSettings = configMapSettings.merge(settings)
- }
-
+ configMapSettings = parseConfigMap
if !configMapSettings.nil?
populateSettingValuesFromConfigMap(configMapSettings)
end
else
- puts "config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults"
+ if (File.file?(@configMapMountPath))
+ ConfigParseErrorLogger.logError("config::unsupported/missing config schema version - '#{@configSchemaVersion}' , using defaults, please use supported schema version")
+ end
@excludePath = "*_kube-system_*.log"
end
@@ -173,13 +156,11 @@ def populateSettingValuesFromConfigMap(parsedConfig)
file.write("export AZMON_STDERR_EXCLUDED_NAMESPACES=#{@stderrExcludeNamespaces}\n")
file.write("export AZMON_CLUSTER_COLLECT_ENV_VAR=#{@collectClusterEnvVariables}\n")
file.write("export AZMON_CLUSTER_LOG_TAIL_EXCLUDE_PATH=#{@excludePath}\n")
- #health_model settings
- file.write("export AZMON_CLUSTER_ENABLE_HEALTH_MODEL=#{@enable_health_model}\n")
# Close file after writing all environment variables
file.close
puts "Both stdout & stderr log collection are turned off for namespaces: '#{@excludePath}' "
puts "****************End Config Processing********************"
else
- puts "config::error::Exception while opening file for writing config environment variables"
+ puts "Exception while opening file for writing config environment variables"
puts "****************End Config Processing********************"
-end
+end
\ No newline at end of file
diff --git a/source/code/go/src/plugins/oms.go b/source/code/go/src/plugins/oms.go
index c5ad307d8..01aab85b4 100644
--- a/source/code/go/src/plugins/oms.go
+++ b/source/code/go/src/plugins/oms.go
@@ -28,6 +28,9 @@ const ContainerLogDataType = "CONTAINER_LOG_BLOB"
// DataType for Insights metric
const InsightsMetricsDataType = "INSIGHTS_METRICS_BLOB"
+// DataType for KubeMonAgentEvent
+const KubeMonAgentEventDataType = "KUBE_MON_AGENT_EVENTS_BLOB"
+
//env varibale which has ResourceId for LA
const ResourceIdEnv = "AKS_RESOURCE_ID"
@@ -46,6 +49,20 @@ const TelegrafTagClusterName = "clusterName"
// clusterId tag
const TelegrafTagClusterID = "clusterId"
+const ConfigErrorEventCategory = "container.azm.ms/configmap"
+
+const PromScrapingErrorEventCategory = "container.azm.ms/promscraping"
+
+const NoErrorEventCategory = "container.azm.ms/noerror"
+
+const KubeMonAgentEventError = "Error"
+
+const KubeMonAgentEventWarning = "Warning"
+
+const KubeMonAgentEventInfo = "Info"
+
+const KubeMonAgentEventsFlushedEvent = "KubeMonAgentEventsFlushed"
+
// ContainerLogPluginConfFilePath --> config file path for container log plugin
const DaemonSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimprov/out_oms.conf"
const ReplicaSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimprov/out_oms.conf"
@@ -54,6 +71,8 @@ const ReplicaSetContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimp
const IPName = "Containers"
const defaultContainerInventoryRefreshInterval = 60
+const kubeMonAgentConfigEventFlushInterval = 60
+
var (
// PluginConfiguration the plugins configuration
PluginConfiguration map[string]string
@@ -71,6 +90,8 @@ var (
ResourceCentric bool
//ResourceName
ResourceName string
+ //KubeMonAgentEvents skip first flush
+ skipKubeMonEventsFlush bool
)
var (
@@ -88,11 +109,19 @@ var (
ContainerLogTelemetryMutex = &sync.Mutex{}
// ClientSet for querying KubeAPIs
ClientSet *kubernetes.Clientset
+ // Config error hash
+ ConfigErrorEvent map[string]KubeMonAgentEventTags
+ // Prometheus scraping error hash
+ PromScrapeErrorEvent map[string]KubeMonAgentEventTags
+ // EventHashUpdateMutex read and write mutex access to the event hash
+ EventHashUpdateMutex = &sync.Mutex{}
)
var (
// ContainerImageNameRefreshTicker updates the container image and names periodically
ContainerImageNameRefreshTicker *time.Ticker
+ // KubeMonAgentConfigEventsSendTicker to send config events every hour
+ KubeMonAgentConfigEventsSendTicker *time.Ticker
)
var (
@@ -142,6 +171,41 @@ type ContainerLogBlob struct {
DataItems []DataItem `json:"DataItems"`
}
+// Config Error message to be sent to Log Analytics
+type laKubeMonAgentEvents struct {
+ Computer string `json:"Computer"`
+ CollectionTime string `json:"CollectionTime"` //mapped to TimeGenerated
+ Category string `json:"Category"`
+ Level string `json:"Level"`
+ ClusterId string `json:"ClusterId"`
+ ClusterName string `json:"ClusterName"`
+ Message string `json:"Message"`
+ Tags string `json:"Tags"`
+}
+
+type KubeMonAgentEventTags struct {
+ PodName string
+ ContainerId string
+ FirstOccurrence string
+ LastOccurrence string
+ Count int
+}
+
+type KubeMonAgentEventBlob struct {
+ DataType string `json:"DataType"`
+ IPName string `json:"IPName"`
+ DataItems []laKubeMonAgentEvents `json:"DataItems"`
+}
+
+// KubeMonAgentEventType to be used as enum
+type KubeMonAgentEventType int
+
+const (
+ // KubeMonAgentEventType to be used as enum for ConfigError and ScrapingError
+ ConfigError KubeMonAgentEventType = iota
+ PromScrapingError
+)
+
func createLogger() *log.Logger {
var logfile *os.File
path := "/var/opt/microsoft/docker-cimprov/log/fluent-bit-out-oms-runtime.log"
@@ -195,7 +259,14 @@ func updateContainerImageNameMaps() {
}
for _, pod := range pods.Items {
- for _, status := range pod.Status.ContainerStatuses {
+ podContainerStatuses := pod.Status.ContainerStatuses
+
+ // Doing this to include init container logs as well
+ podInitContainerStatuses := pod.Status.InitContainerStatuses
+ if (podInitContainerStatuses != nil) && (len(podInitContainerStatuses) > 0) {
+ podContainerStatuses = append(podContainerStatuses, podInitContainerStatuses...)
+ }
+ for _, status := range podContainerStatuses {
lastSlashIndex := strings.LastIndex(status.ContainerID, "/")
containerID := status.ContainerID[lastSlashIndex+1 : len(status.ContainerID)]
image := status.Image
@@ -262,6 +333,223 @@ func convert(in interface{}) (float64, bool) {
}
}
+// PostConfigErrorstoLA sends config/prometheus scraping error log lines to LA
+func populateKubeMonAgentEventHash(record map[interface{}]interface{}, errType KubeMonAgentEventType) {
+ var logRecordString = ToString(record["log"])
+ var eventTimeStamp = ToString(record["time"])
+ containerID, _, podName := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"]))
+
+ Log("Locked EventHashUpdateMutex for updating hash \n ")
+ EventHashUpdateMutex.Lock()
+ switch errType {
+ case ConfigError:
+ // Doing this since the error logger library is adding quotes around the string and a newline to the end because
+ // we are converting string to json to log lines in different lines as one record
+ logRecordString = strings.TrimSuffix(logRecordString, "\n")
+ logRecordString = logRecordString[1 : len(logRecordString)-1]
+
+ if val, ok := ConfigErrorEvent[logRecordString]; ok {
+ Log("In config error existing hash update\n")
+ eventCount := val.Count
+ eventFirstOccurrence := val.FirstOccurrence
+
+ ConfigErrorEvent[logRecordString] = KubeMonAgentEventTags{
+ PodName: podName,
+ ContainerId: containerID,
+ FirstOccurrence: eventFirstOccurrence,
+ LastOccurrence: eventTimeStamp,
+ Count: eventCount + 1,
+ }
+ } else {
+ ConfigErrorEvent[logRecordString] = KubeMonAgentEventTags{
+ PodName: podName,
+ ContainerId: containerID,
+ FirstOccurrence: eventTimeStamp,
+ LastOccurrence: eventTimeStamp,
+ Count: 1,
+ }
+ }
+
+ case PromScrapingError:
+ // Splitting this based on the string 'E! [inputs.prometheus]: ' since the log entry has timestamp and we want to remove that before building the hash
+ var scrapingSplitString = strings.Split(logRecordString, "E! [inputs.prometheus]: ")
+ if scrapingSplitString != nil && len(scrapingSplitString) == 2 {
+ var splitString = scrapingSplitString[1]
+ // Trimming the newline character at the end since this is being added as the key
+ splitString = strings.TrimSuffix(splitString, "\n")
+ if splitString != "" {
+ if val, ok := PromScrapeErrorEvent[splitString]; ok {
+ Log("In config error existing hash update\n")
+ eventCount := val.Count
+ eventFirstOccurrence := val.FirstOccurrence
+
+ PromScrapeErrorEvent[splitString] = KubeMonAgentEventTags{
+ PodName: podName,
+ ContainerId: containerID,
+ FirstOccurrence: eventFirstOccurrence,
+ LastOccurrence: eventTimeStamp,
+ Count: eventCount + 1,
+ }
+ } else {
+ PromScrapeErrorEvent[splitString] = KubeMonAgentEventTags{
+ PodName: podName,
+ ContainerId: containerID,
+ FirstOccurrence: eventTimeStamp,
+ LastOccurrence: eventTimeStamp,
+ Count: 1,
+ }
+ }
+ }
+ }
+ }
+ EventHashUpdateMutex.Unlock()
+ Log("Unlocked EventHashUpdateMutex after updating hash \n ")
+}
+
+// Function to get config error log records after iterating through the two hashes
+func flushKubeMonAgentEventRecords() {
+ for ; true; <-KubeMonAgentConfigEventsSendTicker.C {
+ if skipKubeMonEventsFlush != true {
+ Log("In flushConfigErrorRecords\n")
+ start := time.Now()
+ var resp *http.Response
+ var postError error
+ var elapsed time.Duration
+ var laKubeMonAgentEventsRecords []laKubeMonAgentEvents
+ telemetryDimensions := make(map[string]string)
+
+ telemetryDimensions["ConfigErrorEventCount"] = strconv.Itoa(len(ConfigErrorEvent))
+ telemetryDimensions["PromScrapeErrorEventCount"] = strconv.Itoa(len(PromScrapeErrorEvent))
+
+ if (len(ConfigErrorEvent) > 0) || (len(PromScrapeErrorEvent) > 0) {
+ EventHashUpdateMutex.Lock()
+ Log("Locked EventHashUpdateMutex for reading hashes\n")
+ for k, v := range ConfigErrorEvent {
+ tagJson, err := json.Marshal(v)
+
+ if err != nil {
+ message := fmt.Sprintf("Error while Marshalling config error event tags: %s", err.Error())
+ Log(message)
+ SendException(message)
+ } else {
+ laKubeMonAgentEventsRecord := laKubeMonAgentEvents{
+ Computer: Computer,
+ CollectionTime: start.Format(time.RFC3339),
+ Category: ConfigErrorEventCategory,
+ Level: KubeMonAgentEventError,
+ ClusterId: ResourceID,
+ ClusterName: ResourceName,
+ Message: k,
+ Tags: fmt.Sprintf("%s", tagJson),
+ }
+ laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord)
+ }
+ }
+
+ for k, v := range PromScrapeErrorEvent {
+ tagJson, err := json.Marshal(v)
+ if err != nil {
+ message := fmt.Sprintf("Error while Marshalling prom scrape error event tags: %s", err.Error())
+ Log(message)
+ SendException(message)
+ } else {
+ laKubeMonAgentEventsRecord := laKubeMonAgentEvents{
+ Computer: Computer,
+ CollectionTime: start.Format(time.RFC3339),
+ Category: PromScrapingErrorEventCategory,
+ Level: KubeMonAgentEventWarning,
+ ClusterId: ResourceID,
+ ClusterName: ResourceName,
+ Message: k,
+ Tags: fmt.Sprintf("%s", tagJson),
+ }
+ laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord)
+ }
+ }
+
+ //Clearing out the prometheus scrape hash so that it can be rebuilt with the errors in the next hour
+ for k := range PromScrapeErrorEvent {
+ delete(PromScrapeErrorEvent, k)
+ }
+ Log("PromScrapeErrorEvent cache cleared\n")
+ EventHashUpdateMutex.Unlock()
+ Log("Unlocked EventHashUpdateMutex for reading hashes\n")
+ } else {
+ //Sending a record in case there are no errors to be able to differentiate between no data vs no errors
+ tagsValue := KubeMonAgentEventTags{}
+
+ tagJson, err := json.Marshal(tagsValue)
+ if err != nil {
+ message := fmt.Sprintf("Error while Marshalling no error tags: %s", err.Error())
+ Log(message)
+ SendException(message)
+ } else {
+ laKubeMonAgentEventsRecord := laKubeMonAgentEvents{
+ Computer: Computer,
+ CollectionTime: start.Format(time.RFC3339),
+ Category: NoErrorEventCategory,
+ Level: KubeMonAgentEventInfo,
+ ClusterId: ResourceID,
+ ClusterName: ResourceName,
+ Message: "No errors",
+ Tags: fmt.Sprintf("%s", tagJson),
+ }
+ laKubeMonAgentEventsRecords = append(laKubeMonAgentEventsRecords, laKubeMonAgentEventsRecord)
+ }
+ }
+
+ if len(laKubeMonAgentEventsRecords) > 0 {
+ kubeMonAgentEventEntry := KubeMonAgentEventBlob{
+ DataType: KubeMonAgentEventDataType,
+ IPName: IPName,
+ DataItems: laKubeMonAgentEventsRecords}
+
+ marshalled, err := json.Marshal(kubeMonAgentEventEntry)
+
+ if err != nil {
+ message := fmt.Sprintf("Error while marshalling kubemonagentevent entry: %s", err.Error())
+ Log(message)
+ SendException(message)
+ } else {
+ req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled))
+ req.Header.Set("Content-Type", "application/json")
+ //expensive to do string len for every request, so use a flag
+ if ResourceCentric == true {
+ req.Header.Set("x-ms-AzureResourceId", ResourceID)
+ }
+
+ resp, postError = HTTPClient.Do(req)
+ elapsed = time.Since(start)
+
+ if postError != nil {
+ message := fmt.Sprintf("Error when sending kubemonagentevent request %s \n", err.Error())
+ Log(message)
+ Log("Failed to flush %d records after %s", len(laKubeMonAgentEventsRecords), elapsed)
+ } else if resp == nil || resp.StatusCode != 200 {
+ if resp != nil {
+ Log("Status %s Status Code %d", resp.Status, resp.StatusCode)
+ }
+ Log("Failed to flush %d records after %s", len(laKubeMonAgentEventsRecords), elapsed)
+ } else {
+ numRecords := len(laKubeMonAgentEventsRecords)
+ Log("Successfully flushed %d records in %s", numRecords, elapsed)
+
+ // Send telemetry to AppInsights resource
+ SendEvent(KubeMonAgentEventsFlushedEvent, telemetryDimensions)
+
+ }
+ if resp != nil && resp.Body != nil {
+ defer resp.Body.Close()
+ }
+ }
+ }
+ } else {
+ // Setting this to false to allow for subsequent flushes after the first hour
+ skipKubeMonEventsFlush = false
+ }
+ }
+}
+
//Translates telegraf time series to one or more Azure loganalytics metric(s)
func translateTelegrafMetrics(m map[interface{}]interface{}) ([]*laTelegrafMetric, error) {
@@ -431,7 +719,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int {
DataUpdateMutex.Unlock()
for _, record := range tailPluginRecords {
- containerID, k8sNamespace := GetContainerIDK8sNamespaceFromFileName(ToString(record["filepath"]))
+ containerID, k8sNamespace, _ := GetContainerIDK8sNamespacePodNameFromFileName(ToString(record["filepath"]))
logEntrySource := ToString(record["stream"])
if strings.EqualFold(logEntrySource, "stdout") {
@@ -475,16 +763,18 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int {
FlushedRecordsSize += float64(len(stringMap["LogEntry"]))
dataItems = append(dataItems, dataItem)
- loggedTime, e := time.Parse(time.RFC3339, dataItem.LogEntryTimeStamp)
- if e != nil {
- message := fmt.Sprintf("Error while converting LogEntryTimeStamp for telemetry purposes: %s", e.Error())
- Log(message)
- SendException(message)
- } else {
- ltncy := float64(start.Sub(loggedTime) / time.Millisecond)
- if ltncy >= maxLatency {
- maxLatency = ltncy
- maxLatencyContainer = dataItem.Name + "=" + dataItem.ID
+ if dataItem.LogEntryTimeStamp != "" {
+ loggedTime, e := time.Parse(time.RFC3339, dataItem.LogEntryTimeStamp)
+ if e != nil {
+ message := fmt.Sprintf("Error while converting LogEntryTimeStamp for telemetry purposes: %s", e.Error())
+ Log(message)
+ SendException(message)
+ } else {
+ ltncy := float64(start.Sub(loggedTime) / time.Millisecond)
+ if ltncy >= maxLatency {
+ maxLatency = ltncy
+ maxLatencyContainer = dataItem.Name + "=" + dataItem.ID
+ }
}
}
}
@@ -502,6 +792,7 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int {
SendException(message)
return output.FLB_OK
}
+
req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled))
req.Header.Set("Content-Type", "application/json")
//expensive to do string len for every request, so use a flag
@@ -552,11 +843,12 @@ func containsKey(currentMap map[string]bool, key string) bool {
return c
}
-// GetContainerIDK8sNamespaceFromFileName Gets the container ID From the file Name
+// GetContainerIDK8sNamespacePodNameFromFileName Gets the container ID, k8s namespace and pod name From the file Name
// sample filename kube-proxy-dgcx7_kube-system_kube-proxy-8df7e49e9028b60b5b0d0547f409c455a9567946cf763267b7e6fa053ab8c182.log
-func GetContainerIDK8sNamespaceFromFileName(filename string) (string, string) {
+func GetContainerIDK8sNamespacePodNameFromFileName(filename string) (string, string, string) {
id := ""
ns := ""
+ podName := ""
start := strings.LastIndex(filename, "-")
end := strings.LastIndex(filename, ".")
@@ -576,7 +868,16 @@ func GetContainerIDK8sNamespaceFromFileName(filename string) (string, string) {
ns = filename[start+1 : end]
}
- return id, ns
+ start = strings.Index(filename, "/containers/")
+ end = strings.Index(filename, "_")
+
+ if start >= end || start == -1 || end == -1 {
+ podName = ""
+ } else {
+ podName = filename[(start + len("/containers/")):end]
+ }
+
+ return id, ns, podName
}
// InitializePlugin reads and populates plugin configuration
@@ -586,6 +887,12 @@ func InitializePlugin(pluginConfPath string, agentVersion string) {
StderrIgnoreNsSet = make(map[string]bool)
ImageIDMap = make(map[string]string)
NameIDMap = make(map[string]string)
+ // Keeping the two error hashes separate since we need to keep the config error hash for the lifetime of the container
+ // whereas the prometheus scrape error hash needs to be refreshed every hour
+ ConfigErrorEvent = make(map[string]KubeMonAgentEventTags)
+ PromScrapeErrorEvent = make(map[string]KubeMonAgentEventTags)
+ // Initilizing this to true to skip the first kubemonagentevent flush since the errors are not populated at this time
+ skipKubeMonEventsFlush = true
pluginConfig, err := ReadConfiguration(pluginConfPath)
if err != nil {
@@ -640,6 +947,9 @@ func InitializePlugin(pluginConfPath string, agentVersion string) {
Log("containerInventoryRefreshInterval = %d \n", containerInventoryRefreshInterval)
ContainerImageNameRefreshTicker = time.NewTicker(time.Second * time.Duration(containerInventoryRefreshInterval))
+ Log("kubeMonAgentConfigEventFlushInterval = %d \n", kubeMonAgentConfigEventFlushInterval)
+ KubeMonAgentConfigEventsSendTicker = time.NewTicker(time.Minute * time.Duration(kubeMonAgentConfigEventFlushInterval))
+
// Populate Computer field
containerHostName, err := ioutil.ReadFile(pluginConfig["container_host_file_path"])
if err != nil {
@@ -682,7 +992,11 @@ func InitializePlugin(pluginConfPath string, agentVersion string) {
populateExcludedStdoutNamespaces()
populateExcludedStderrNamespaces()
go updateContainerImageNameMaps()
+
+ // Flush config error records every hour
+ go flushKubeMonAgentEventRecords()
} else {
Log("Running in replicaset. Disabling container enrichment caching & updates \n")
}
+
}
diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go
index e9e7124b7..1f1915798 100644
--- a/source/code/go/src/plugins/out_oms.go
+++ b/source/code/go/src/plugins/out_oms.go
@@ -1,14 +1,14 @@
package main
import (
- "github.com/fluent/fluent-bit-go/output"
"github.com/Microsoft/ApplicationInsights-Go/appinsights"
+ "github.com/fluent/fluent-bit-go/output"
)
import (
"C"
+ "os"
"strings"
"unsafe"
- "os"
)
//export FLBPluginRegister
@@ -61,6 +61,7 @@ func FLBPluginFlush(data unsafe.Pointer, length C.int, tag *C.char) int {
incomingTag := strings.ToLower(C.GoString(tag))
if strings.Contains(incomingTag, "oms.container.log.flbplugin") {
+ // This will also include populating cache to be sent as for config events
return PushToAppInsightsTraces(records, appinsights.Information, incomingTag)
} else if strings.Contains(incomingTag, "oms.container.perf.telegraf") {
return PostTelegrafMetricsToLA(records)
diff --git a/source/code/go/src/plugins/telemetry.go b/source/code/go/src/plugins/telemetry.go
index 5fc0fa843..d5675187f 100644
--- a/source/code/go/src/plugins/telemetry.go
+++ b/source/code/go/src/plugins/telemetry.go
@@ -42,6 +42,7 @@ const (
envAKSResourceID = "AKS_RESOURCE_ID"
envACSResourceName = "ACS_RESOURCE_NAME"
envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH"
+ envAppInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT"
metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec"
metricNameAvgLogGenerationRate = "ContainerLogsGeneratedPerSec"
metricNameLogSize = "ContainerLogsSize"
@@ -141,7 +142,15 @@ func InitializeTelemetryClient(agentVersion string) (int, error) {
return -1, err
}
- TelemetryClient = appinsights.NewTelemetryClient(string(decIkey))
+ appInsightsEndpoint := os.Getenv(envAppInsightsEndpoint)
+ telemetryClientConfig := appinsights.NewTelemetryConfiguration(string(decIkey))
+ // endpoint override required only for sovereign clouds
+ if appInsightsEndpoint != "" {
+ Log("Overriding the default AppInsights EndpointUrl with %s", appInsightsEndpoint)
+ telemetryClientConfig.EndpointUrl = envAppInsightsEndpoint
+ }
+ TelemetryClient = appinsights.NewTelemetryClientFromConfig(telemetryClientConfig)
+
telemetryOffSwitch := os.Getenv("DISABLE_TELEMETRY")
if strings.Compare(strings.ToLower(telemetryOffSwitch), "true") == 0 {
Log("Appinsights telemetry is disabled \n")
@@ -189,7 +198,15 @@ func InitializeTelemetryClient(agentVersion string) (int, error) {
func PushToAppInsightsTraces(records []map[interface{}]interface{}, severityLevel contracts.SeverityLevel, tag string) int {
var logLines []string
for _, record := range records {
- logLines = append(logLines, ToString(record["log"]))
+ // If record contains config error or prometheus scraping errors send it to KubeMonAgentEvents table
+ var logEntry = ToString(record["log"])
+ if strings.Contains(logEntry, "config::error") {
+ populateKubeMonAgentEventHash(record, ConfigError)
+ } else if strings.Contains(logEntry, "E! [inputs.prometheus]") {
+ populateKubeMonAgentEventHash(record, PromScrapingError)
+ } else {
+ logLines = append(logLines, logEntry)
+ }
}
traceEntry := strings.Join(logLines, "\n")
diff --git a/source/code/plugin/ApplicationInsightsUtility.rb b/source/code/plugin/ApplicationInsightsUtility.rb
index 5dc2bfab8..85b424e69 100644
--- a/source/code/plugin/ApplicationInsightsUtility.rb
+++ b/source/code/plugin/ApplicationInsightsUtility.rb
@@ -18,6 +18,7 @@ class ApplicationInsightsUtility
@@EnvAksRegion = "AKS_REGION"
@@EnvAgentVersion = "AGENT_VERSION"
@@EnvApplicationInsightsKey = "APPLICATIONINSIGHTS_AUTH"
+ @@EnvApplicationInsightsEndpoint = "APPLICATIONINSIGHTS_ENDPOINT"
@@EnvControllerType = "CONTROLLER_TYPE"
@@CustomProperties = {}
@@ -62,6 +63,8 @@ def initializeUtility()
@@CustomProperties["AgentVersion"] = ENV[@@EnvAgentVersion]
@@CustomProperties["ControllerType"] = ENV[@@EnvControllerType]
encodedAppInsightsKey = ENV[@@EnvApplicationInsightsKey]
+ appInsightsEndpoint = ENV[@@EnvApplicationInsightsEndpoint]
+ @@CustomProperties["WorkspaceCloud"] = getWorkspaceCloud
#Check if telemetry is turned off
telemetryOffSwitch = ENV["DISABLE_TELEMETRY"]
@@ -70,7 +73,16 @@ def initializeUtility()
@@Tc = ApplicationInsights::TelemetryClient.new
elsif !encodedAppInsightsKey.nil?
decodedAppInsightsKey = Base64.decode64(encodedAppInsightsKey)
- @@Tc = ApplicationInsights::TelemetryClient.new decodedAppInsightsKey
+ #override ai endpoint if its available otherwise use default.
+ if appInsightsEndpoint && !appInsightsEndpoint.nil? && !appInsightsEndpoint.empty?
+ $log.info("AppInsightsUtility: Telemetry client uses overrided endpoint url : #{appInsightsEndpoint}")
+ telemetrySynchronousSender = ApplicationInsights::Channel::SynchronousSender.new appInsightsEndpoint
+ telemetrySynchronousQueue = ApplicationInsights::Channel::SynchronousQueue.new(telemetrySynchronousSender)
+ telemetryChannel = ApplicationInsights::Channel::TelemetryChannel.new nil, telemetrySynchronousQueue
+ @@Tc = ApplicationInsights::TelemetryClient.new decodedAppInsightsKey, telemetryChannel
+ else
+ @@Tc = ApplicationInsights::TelemetryClient.new decodedAppInsightsKey
+ end
end
rescue => errorStr
$log.warn("Exception in AppInsightsUtility: initilizeUtility - error: #{errorStr}")
@@ -219,5 +231,32 @@ def getWorkspaceId()
$log.warn("Exception in AppInsightsUtility: getWorkspaceId - error: #{errorStr}")
end
end
+
+ def getWorkspaceCloud()
+ begin
+ adminConf = {}
+ confFile = File.open(@OmsAdminFilePath, "r")
+ confFile.each_line do |line|
+ splitStrings = line.split("=")
+ adminConf[splitStrings[0]] = splitStrings[1]
+ end
+ workspaceDomain = adminConf["URL_TLD"].strip
+ workspaceCloud = "AzureCloud"
+ if workspaceDomain.casecmp("opinsights.azure.com") == 0
+ workspaceCloud = "AzureCloud"
+ elsif workspaceDomain.casecmp("opinsights.azure.cn") == 0
+ workspaceCloud = "AzureChinaCloud"
+ elsif workspaceDomain.casecmp("opinsights.azure.us") == 0
+ workspaceCloud = "AzureUSGovernment"
+ elsif workspaceDomain.casecmp("opinsights.azure.de") == 0
+ workspaceCloud = "AzureGermanCloud"
+ else
+ workspaceCloud = "Unknown"
+ end
+ return workspaceCloud
+ rescue => errorStr
+ $log.warn("Exception in AppInsightsUtility: getWorkspaceCloud - error: #{errorStr}")
+ end
+ end
end
end
diff --git a/source/code/plugin/KubernetesApiClient.rb b/source/code/plugin/KubernetesApiClient.rb
index 48b25bf14..be1a51791 100644
--- a/source/code/plugin/KubernetesApiClient.rb
+++ b/source/code/plugin/KubernetesApiClient.rb
@@ -356,9 +356,19 @@ def getContainerResourceRequestsAndLimits(metricJSON, metricCategory, metricName
else
podUid = pod["metadata"]["uid"]
end
- if (!pod["spec"]["containers"].nil? && !pod["spec"]["nodeName"].nil?)
+
+ podContainers = []
+ if !pod["spec"]["containers"].nil? && !pod["spec"]["containers"].empty?
+ podContainers = podContainers + pod["spec"]["containers"]
+ end
+ # Adding init containers to the record list as well.
+ if !pod["spec"]["initContainers"].nil? && !pod["spec"]["initContainers"].empty?
+ podContainers = podContainers + pod["spec"]["initContainers"]
+ end
+
+ if (!podContainers.nil? && !podContainers.empty? && !pod["spec"]["nodeName"].nil?)
nodeName = pod["spec"]["nodeName"]
- pod["spec"]["containers"].each do |container|
+ podContainers.each do |container|
containerName = container["name"]
metricTime = Time.now.utc.iso8601 #2018-01-30T19:36:14Z
if (!container["resources"].nil? && !container["resources"].empty? && !container["resources"][metricCategory].nil? && !container["resources"][metricCategory][metricNameToCollect].nil?)
diff --git a/source/code/plugin/filter_cadvisor_health_container.rb b/source/code/plugin/filter_cadvisor_health_container.rb
index 4090092a9..2eccd125f 100644
--- a/source/code/plugin/filter_cadvisor_health_container.rb
+++ b/source/code/plugin/filter_cadvisor_health_container.rb
@@ -5,66 +5,57 @@ module Fluent
require 'logger'
require 'json'
require_relative 'oms_common'
- require_relative 'HealthMonitorUtils'
- require_relative 'HealthMonitorState'
require_relative "ApplicationInsightsUtility"
+ Dir[File.join(__dir__, './health', '*.rb')].each { |file| require file }
class CAdvisor2ContainerHealthFilter < Filter
+ include HealthModel
Fluent::Plugin.register_filter('filter_cadvisor_health_container', self)
config_param :log_path, :string, :default => '/var/opt/microsoft/docker-cimprov/log/health_monitors.log'
config_param :metrics_to_collect, :string, :default => 'cpuUsageNanoCores,memoryRssBytes'
config_param :container_resource_refresh_interval_minutes, :integer, :default => 5
- @@object_name_k8s_node = 'K8SNode'
@@object_name_k8s_container = 'K8SContainer'
-
@@counter_name_cpu = 'cpuusagenanocores'
@@counter_name_memory_rss = 'memoryrssbytes'
- @@health_monitor_config = {}
-
- @@hostName = (OMS::Common.get_hostname)
- @@clusterName = KubernetesApiClient.getClusterName
- @@clusterId = KubernetesApiClient.getClusterId
- @@clusterRegion = KubernetesApiClient.getClusterRegion
- @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled
-
def initialize
- super
- @cpu_capacity = 0.0
- @memory_capacity = 0.0
- @last_resource_refresh = DateTime.now.to_time.to_i
- @metrics_to_collect_hash = {}
+ begin
+ super
+ @metrics_to_collect_hash = {}
+ @formatter = HealthContainerCpuMemoryRecordFormatter.new
+ rescue => e
+ @log.info "Error in filter_cadvisor_health_container initialize #{e.backtrace}"
+ ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
+ end
end
def configure(conf)
- super
- @log = HealthMonitorUtils.getLogHandle
- @log.debug {'Starting filter_cadvisor2health plugin'}
+ begin
+ super
+ @log = HealthMonitorUtils.get_log_handle
+ @log.debug {'Starting filter_cadvisor2health plugin'}
+ rescue => e
+ @log.info "Error in filter_cadvisor_health_container configure #{e.backtrace}"
+ ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
+ end
end
def start
- super
- @metrics_to_collect_hash = HealthMonitorUtils.build_metrics_hash(@metrics_to_collect)
- @log.debug "Calling ensure_cpu_memory_capacity_set cpu_capacity #{@cpu_capacity} memory_capacity #{@memory_capacity}"
- node_capacity = HealthMonitorUtils.ensure_cpu_memory_capacity_set(@@hm_log, @cpu_capacity, @memory_capacity, @@hostName)
- @cpu_capacity = node_capacity[0]
- @memory_capacity = node_capacity[1]
- @log.info "CPU Capacity #{@cpu_capacity} Memory Capacity #{@memory_capacity}"
- #HealthMonitorUtils.refresh_kubernetes_api_data(@log, @@hostName)
- @@health_monitor_config = HealthMonitorUtils.getHealthMonitorConfig
- ApplicationInsightsUtility.sendCustomEvent("filter_cadvisor_health Plugin Start", {})
+ begin
+ super
+ @metrics_to_collect_hash = HealthMonitorUtils.build_metrics_hash(@metrics_to_collect)
+ ApplicationInsightsUtility.sendCustomEvent("filter_cadvisor_health_container Plugin Start", {})
+ rescue => e
+ @log.info "Error in filter_cadvisor_health_container start #{e.backtrace}"
+ ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
+ end
end
def filter_stream(tag, es)
- if !@@cluster_health_model_enabled
- @log.info "Cluster Health Model disabled in filter_cadvisor_health_container"
- return []
- end
new_es = MultiEventStream.new
- #HealthMonitorUtils.refresh_kubernetes_api_data(@log, @hostName)
records_count = 0
es.each { |time, record|
begin
@@ -74,10 +65,11 @@ def filter_stream(tag, es)
records_count += 1
end
rescue => e
- router.emit_error_event(tag, time, record, e)
+ @log.info "Error in filter_cadvisor_health_container filter_stream #{e.backtrace}"
+ ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
end
}
- @log.debug "Filter Records Count #{records_count}"
+ @log.debug "filter_cadvisor_health_container Records Count #{records_count}"
new_es
end
@@ -88,176 +80,19 @@ def filter(tag, time, record)
end
object_name = record['DataItems'][0]['ObjectName']
counter_name = record['DataItems'][0]['Collections'][0]['CounterName'].downcase
- if @metrics_to_collect_hash.key?(counter_name.downcase)
- metric_value = record['DataItems'][0]['Collections'][0]['Value']
- case object_name
- when @@object_name_k8s_container
- case counter_name.downcase
- when @@counter_name_cpu
- # @log.debug "Object Name #{object_name}"
- # @log.debug "Counter Name #{counter_name}"
- # @log.debug "Metric Value #{metric_value}"
- #return process_container_cpu_record(record, metric_value)
- when @@counter_name_memory_rss
- #return process_container_memory_record(record, metric_value)
- end
- when @@object_name_k8s_node
- case counter_name.downcase
- when @@counter_name_cpu
- #process_node_cpu_record(record, metric_value)
- when @@counter_name_memory_rss
- #process_node_memory_record(record, metric_value)
- end
+ if @metrics_to_collect_hash.key?(counter_name)
+ if object_name == @@object_name_k8s_container
+ return @formatter.get_record_from_cadvisor_record(record)
end
end
+ return nil
rescue => e
@log.debug "Error in filter #{e}"
@log.debug "record #{record}"
@log.debug "backtrace #{e.backtrace}"
- ApplicationInsightsUtility.sendExceptionTelemetry(e)
- return nil
- end
- end
-
- def process_container_cpu_record(record, metric_value)
- monitor_id = HealthMonitorConstants::WORKLOAD_CONTAINER_CPU_PERCENTAGE_MONITOR_ID
- @log.debug "processing container cpu record"
- if record.nil?
- return nil
- else
- instance_name = record['DataItems'][0]['InstanceName']
- key = HealthMonitorUtils.getContainerKeyFromInstanceName(instance_name)
- container_metadata = HealthMonitorUtils.getContainerMetadata(key)
- if !container_metadata.nil?
- cpu_limit = container_metadata['cpuLimit']
- end
-
- if cpu_limit.to_s.empty?
- #@log.info "CPU Limit is nil"
- cpu_limit = @cpu_capacity
- end
-
- #@log.info "cpu limit #{cpu_limit}"
-
- percent = (metric_value.to_f/cpu_limit*100).round(2)
- #@log.debug "Container #{key} | Percentage of CPU limit: #{percent}"
- state = HealthMonitorState.computeHealthMonitorState(@log, monitor_id, percent, @@health_monitor_config[HealthMonitorConstants::WORKLOAD_CONTAINER_CPU_PERCENTAGE_MONITOR_ID])
- #@log.debug "Computed State : #{state}"
- timestamp = record['DataItems'][0]['Timestamp']
- health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"cpuUsageMillicores" => metric_value/1000000.to_f, "cpuUtilizationPercentage" => percent}}
- #@log.info health_monitor_record
-
- monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(@log, monitor_id, [@@clusterId, @@hostName, key])
- #@log.info "Monitor Instance Id: #{monitor_instance_id}"
- temp = record.nil? ? "Nil" : record["MonitorInstanceId"]
- @log.info "Processed Container CPU #{temp}"
- return record
- end
- return nil
- end
-
- def process_container_memory_record(record, metric_value)
- monitor_id = HealthMonitorConstants::WORKLOAD_CONTAINER_MEMORY_PERCENTAGE_MONITOR_ID
- #@log.debug "processing container memory record"
- if record.nil?
+ ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
return nil
- else
- instance_name = record['DataItems'][0]['InstanceName']
- key = HealthMonitorUtils.getContainerKeyFromInstanceName(instance_name)
- container_metadata = HealthMonitorUtils.getContainerMetadata(key)
- if !container_metadata.nil?
- memory_limit = container_metadata['memoryLimit']
- end
-
- if memory_limit.to_s.empty?
- #@log.info "Memory Limit is nil"
- memory_limit = @memory_capacity
- end
-
- #@log.info "memory limit #{memory_limit}"
-
- percent = (metric_value.to_f/memory_limit*100).round(2)
- #@log.debug "Container #{key} | Percentage of Memory limit: #{percent}"
- state = HealthMonitorState.computeHealthMonitorState(@log, monitor_id, percent, @@health_monitor_config[HealthMonitorConstants::WORKLOAD_CONTAINER_MEMORY_PERCENTAGE_MONITOR_ID])
- #@log.debug "Computed State : #{state}"
- timestamp = record['DataItems'][0]['Timestamp']
- health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"memoryRssBytes" => metric_value.to_f, "memoryUtilizationPercentage" => percent}}
- #@log.info health_monitor_record
-
- monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(@log, monitor_id, [@@clusterId, @@hostName, key])
- #@log.info "Monitor Instance Id: #{monitor_instance_id}"
- temp = record.nil? ? "Nil" : record["MonitorInstanceId"]
- @log.info "Processed Container Memory #{temp}"
- return record
- end
- return nil
- end
-
- def process_node_cpu_record(record, metric_value)
- monitor_id = HealthMonitorConstants::NODE_CPU_MONITOR_ID
- #@log.debug "processing node cpu record"
- if record.nil?
- return nil
- else
- instance_name = record['DataItems'][0]['InstanceName']
- #@log.info "CPU capacity #{@cpu_capacity}"
-
- percent = (metric_value.to_f/@cpu_capacity*100).round(2)
- #@log.debug "Percentage of CPU limit: #{percent}"
- state = HealthMonitorState.computeHealthMonitorState(@log, monitor_id, percent, @@health_monitor_config[HealthMonitorConstants::NODE_CPU_MONITOR_ID])
- #@log.debug "Computed State : #{state}"
- timestamp = record['DataItems'][0]['Timestamp']
- health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"cpuUsageMillicores" => metric_value/1000000.to_f, "cpuUtilizationPercentage" => percent}}
-
- monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(@log, monitor_id, [@@clusterId, @@hostName])
- # record = HealthMonitorSignalReducer.reduceSignal(@log, monitor_id, monitor_instance_id, @@health_monitor_config[monitor_id], node_name: @@hostName)
- # temp = record.nil? ? "Nil" : record["MonitorInstanceId"]
- health_record = {}
- time_now = Time.now.utc.iso8601
- health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
- health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
- health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
- health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
- health_record[HealthMonitorRecordFields::NODE_NAME] = @@hostName
- @log.info "Processed Node CPU"
- return health_record
- end
- return nil
- end
-
- def process_node_memory_record(record, metric_value)
- monitor_id = HealthMonitorConstants::NODE_MEMORY_MONITOR_ID
- #@log.debug "processing node memory record"
- if record.nil?
- return nil
- else
- instance_name = record['DataItems'][0]['InstanceName']
- #@log.info "Memory capacity #{@memory_capacity}"
-
- percent = (metric_value.to_f/@memory_capacity*100).round(2)
- #@log.debug "Percentage of Memory limit: #{percent}"
- state = HealthMonitorState.computeHealthMonitorState(@log, monitor_id, percent, @@health_monitor_config[HealthMonitorConstants::NODE_MEMORY_MONITOR_ID])
- #@log.debug "Computed State : #{state}"
- timestamp = record['DataItems'][0]['Timestamp']
- health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"memoryRssBytes" => metric_value.to_f, "memoryUtilizationPercentage" => percent}}
- #@log.info health_monitor_record
-
- monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@clusterId, @@hostName])
- #@log.info "Monitor Instance Id: #{monitor_instance_id}"
- # temp = record.nil? ? "Nil" : record["MonitorInstanceId"]
- health_record = {}
- time_now = Time.now.utc.iso8601
- health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
- health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
- health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
- health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
- health_record[HealthMonitorRecordFields::NODE_NAME] = @@hostName
- @log.info "Processed Node Memory"
- return health_record
end
- return nil
end
end
end
diff --git a/source/code/plugin/filter_cadvisor_health_node.rb b/source/code/plugin/filter_cadvisor_health_node.rb
index faa574993..d2f735cd1 100644
--- a/source/code/plugin/filter_cadvisor_health_node.rb
+++ b/source/code/plugin/filter_cadvisor_health_node.rb
@@ -30,13 +30,10 @@ class CAdvisor2NodeHealthFilter < Filter
@@clusterName = KubernetesApiClient.getClusterName
@@clusterId = KubernetesApiClient.getClusterId
@@clusterRegion = KubernetesApiClient.getClusterRegion
- @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled
def initialize
begin
super
- @cpu_capacity = 0.0
- @memory_capacity = 0.0
@last_resource_refresh = DateTime.now.to_time.to_i
@metrics_to_collect_hash = {}
@resources = HealthKubernetesResources.instance # this doesnt require node and pod inventory. So no need to populate them
@@ -59,6 +56,8 @@ def configure(conf)
def start
begin
super
+ @cpu_capacity = 1.0 #avoid divide by zero error in case of network issues accessing kube-api
+ @memory_capacity = 1.0
@metrics_to_collect_hash = HealthMonitorUtils.build_metrics_hash(@metrics_to_collect)
@log.debug "Calling ensure_cpu_memory_capacity_set cpu_capacity #{@cpu_capacity} memory_capacity #{@memory_capacity}"
node_capacity = HealthMonitorUtils.ensure_cpu_memory_capacity_set(@@hm_log, @cpu_capacity, @memory_capacity, @@hostName)
@@ -73,27 +72,26 @@ def start
end
def filter_stream(tag, es)
- if !@@cluster_health_model_enabled
- @log.info "Cluster Health Model disabled in filter_cadvisor_health_node"
- return MultiEventStream.new
- end
- new_es = MultiEventStream.new
- #HealthMonitorUtils.refresh_kubernetes_api_data(@log, @hostName)
- records_count = 0
- es.each { |time, record|
- begin
+ begin
+ node_capacity = HealthMonitorUtils.ensure_cpu_memory_capacity_set(@@hm_log, @cpu_capacity, @memory_capacity, @@hostName)
+ @cpu_capacity = node_capacity[0]
+ @memory_capacity = node_capacity[1]
+ new_es = MultiEventStream.new
+ records_count = 0
+ es.each { |time, record|
filtered_record = filter(tag, time, record)
if !filtered_record.nil?
new_es.add(time, filtered_record)
records_count += 1
end
- rescue => e
- @log.info "Error in filter_stream for filter_cadvisor_health_node #{e.message}"
+ }
+ @log.debug "Filter Records Count #{records_count}"
+ return new_es
+ rescue => e
+ @log.info "Error in filter_cadvisor_health_node filter_stream #{e.backtrace}"
ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
- end
- }
- @log.debug "Filter Records Count #{records_count}"
- new_es
+ return MultiEventStream.new
+ end
end
def filter(tag, time, record)
@@ -101,21 +99,12 @@ def filter(tag, time, record)
if record.key?("MonitorLabels")
return record
end
+
object_name = record['DataItems'][0]['ObjectName']
counter_name = record['DataItems'][0]['Collections'][0]['CounterName'].downcase
if @metrics_to_collect_hash.key?(counter_name.downcase)
metric_value = record['DataItems'][0]['Collections'][0]['Value']
case object_name
- when @@object_name_k8s_container
- case counter_name.downcase
- when @@counter_name_cpu
- # @log.debug "Object Name #{object_name}"
- # @log.debug "Counter Name #{counter_name}"
- # @log.debug "Metric Value #{metric_value}"
- #return process_container_cpu_record(record, metric_value)
- when @@counter_name_memory_rss
- #return process_container_memory_record(record, metric_value)
- end
when @@object_name_k8s_node
case counter_name.downcase
when @@counter_name_cpu
@@ -134,82 +123,8 @@ def filter(tag, time, record)
end
end
- def process_container_cpu_record(record, metric_value)
- monitor_id = HealthMonitorConstants::CONTAINER_CPU_MONITOR_ID
- @log.debug "processing container cpu record"
- if record.nil?
- return nil
- else
- instance_name = record['DataItems'][0]['InstanceName']
- key = HealthMonitorUtils.getContainerKeyFromInstanceName(instance_name)
- container_metadata = HealthMonitorUtils.getContainerMetadata(key)
- if !container_metadata.nil?
- cpu_limit = container_metadata['cpuLimit']
- end
-
- if cpu_limit.to_s.empty?
- #@log.info "CPU Limit is nil"
- cpu_limit = @cpu_capacity
- end
-
- #@log.info "cpu limit #{cpu_limit}"
-
- percent = (metric_value.to_f/cpu_limit*100).round(2)
- #@log.debug "Container #{key} | Percentage of CPU limit: #{percent}"
- state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(monitor_id))
- #@log.debug "Computed State : #{state}"
- timestamp = record['DataItems'][0]['Timestamp']
- health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"cpuUsageMillicores" => metric_value/1000000.to_f, "cpuUtilizationPercentage" => percent}}
- #@log.info health_monitor_record
-
- monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@clusterId, @@hostName, key])
- #@log.info "Monitor Instance Id: #{monitor_instance_id}"
- temp = record.nil? ? "Nil" : record["MonitorInstanceId"]
- @log.info "Processed Container CPU #{temp}"
- return record
- end
- return nil
- end
-
- def process_container_memory_record(record, metric_value)
- monitor_id = HealthMonitorConstants::CONTAINER_MEMORY_MONITOR_ID
- #@log.debug "processing container memory record"
- if record.nil?
- return nil
- else
- instance_name = record['DataItems'][0]['InstanceName']
- key = HealthMonitorUtils.getContainerKeyFromInstanceName(instance_name)
- container_metadata = HealthMonitorUtils.getContainerMetadata(key)
- if !container_metadata.nil?
- memory_limit = container_metadata['memoryLimit']
- end
-
- if memory_limit.to_s.empty?
- #@log.info "Memory Limit is nil"
- memory_limit = @memory_capacity
- end
-
- #@log.info "memory limit #{memory_limit}"
-
- percent = (metric_value.to_f/memory_limit*100).round(2)
- #@log.debug "Container #{key} | Percentage of Memory limit: #{percent}"
- state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(HealthMonitorConstants::CONTAINER_MEMORY_MONITOR_ID))
- #@log.debug "Computed State : #{state}"
- timestamp = record['DataItems'][0]['Timestamp']
- health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"memoryRssBytes" => metric_value.to_f, "memoryUtilizationPercentage" => percent}}
- #@log.info health_monitor_record
-
- monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@clusterId, @@hostName, key])
- #@log.info "Monitor Instance Id: #{monitor_instance_id}"
- temp = record.nil? ? "Nil" : record["MonitorInstanceId"]
- @log.info "Processed Container Memory #{temp}"
- return record
- end
- return nil
- end
-
def process_node_cpu_record(record, metric_value)
- monitor_id = HealthMonitorConstants::NODE_CPU_MONITOR_ID
+ monitor_id = MonitorId::NODE_CPU_MONITOR_ID
#@log.debug "processing node cpu record"
if record.nil?
return nil
@@ -219,7 +134,7 @@ def process_node_cpu_record(record, metric_value)
percent = (metric_value.to_f/@cpu_capacity*100).round(2)
#@log.debug "Percentage of CPU limit: #{percent}"
- state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(HealthMonitorConstants::NODE_CPU_MONITOR_ID))
+ state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(MonitorId::NODE_CPU_MONITOR_ID))
#@log.debug "Computed State : #{state}"
timestamp = record['DataItems'][0]['Timestamp']
health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"cpuUsageMillicores" => metric_value/1000000.to_f, "cpuUtilizationPercentage" => percent}}
@@ -231,7 +146,7 @@ def process_node_cpu_record(record, metric_value)
health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
health_record[HealthMonitorRecordFields::NODE_NAME] = @@hostName
@log.info "Processed Node CPU"
@@ -241,7 +156,7 @@ def process_node_cpu_record(record, metric_value)
end
def process_node_memory_record(record, metric_value)
- monitor_id = HealthMonitorConstants::NODE_MEMORY_MONITOR_ID
+ monitor_id = MonitorId::NODE_MEMORY_MONITOR_ID
#@log.debug "processing node memory record"
if record.nil?
return nil
@@ -251,7 +166,7 @@ def process_node_memory_record(record, metric_value)
percent = (metric_value.to_f/@memory_capacity*100).round(2)
#@log.debug "Percentage of Memory limit: #{percent}"
- state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(HealthMonitorConstants::NODE_MEMORY_MONITOR_ID))
+ state = HealthMonitorUtils.compute_percentage_state(percent, @provider.get_config(MonitorId::NODE_MEMORY_MONITOR_ID))
#@log.debug "Computed State : #{state}"
timestamp = record['DataItems'][0]['Timestamp']
health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"memoryRssBytes" => metric_value.to_f, "memoryUtilizationPercentage" => percent}}
@@ -263,7 +178,7 @@ def process_node_memory_record(record, metric_value)
health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
health_record[HealthMonitorRecordFields::NODE_NAME] = @@hostName
@log.info "Processed Node Memory"
diff --git a/source/code/plugin/filter_health_model_builder.rb b/source/code/plugin/filter_health_model_builder.rb
index 39452cb7e..fa92038e6 100644
--- a/source/code/plugin/filter_health_model_builder.rb
+++ b/source/code/plugin/filter_health_model_builder.rb
@@ -19,11 +19,10 @@ class FilterHealthModelBuilder < Filter
attr_reader :buffer, :model_builder, :health_model_definition, :monitor_factory, :state_finalizers, :monitor_set, :model_builder, :hierarchy_builder, :resources, :kube_api_down_handler, :provider, :reducer, :state, :generator
include HealthModel
- @@rewrite_tag = 'oms.api.KubeHealth.AgentCollectionTime'
+ @@rewrite_tag = 'kubehealth.Signals'
@@cluster_id = KubernetesApiClient.getClusterId
@@token_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
@@cert_file_path = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
- @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled
def initialize
begin
@@ -49,6 +48,7 @@ def initialize
@state.initialize_state(deserialized_state_info)
@cluster_old_state = 'none'
@cluster_new_state = 'none'
+ @container_cpu_memory_records = []
rescue => e
ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
end
@@ -77,31 +77,45 @@ def shutdown
def filter_stream(tag, es)
begin
- if !@@cluster_health_model_enabled
- @log.info "Cluster Health Model disabled in filter_health_model_builder"
- return []
- end
new_es = MultiEventStream.new
time = Time.now
- if tag.start_with?("kubehealth.DaemonSet")
- records = []
+ if tag.start_with?("kubehealth.DaemonSet.Node")
+ node_records = []
+ if !es.nil?
+ es.each{|time, record|
+ node_records.push(record)
+ }
+ @buffer.add_to_buffer(node_records)
+ end
+ return MultiEventStream.new
+ elsif tag.start_with?("kubehealth.DaemonSet.Container")
+ container_records = []
if !es.nil?
es.each{|time, record|
- records.push(record)
+ container_records.push(record)
}
- @buffer.add_to_buffer(records)
end
- return []
+ container_records_aggregator = HealthContainerCpuMemoryAggregator.new(@resources, @provider)
+ deduped_records = container_records_aggregator.dedupe_records(container_records)
+ @container_cpu_memory_records.push(*deduped_records) # push the records for aggregation later
+ return MultiEventStream.new
elsif tag.start_with?("kubehealth.ReplicaSet")
- @log.info "TAG #{tag}"
records = []
es.each{|time, record|
records.push(record)
}
@buffer.add_to_buffer(records)
+
+ container_records_aggregator = HealthContainerCpuMemoryAggregator.new(@resources, @provider)
+ container_records_aggregator.aggregate(@container_cpu_memory_records)
+ container_records_aggregator.compute_state
+ aggregated_container_records = container_records_aggregator.get_records
+ @buffer.add_to_buffer(aggregated_container_records)
+
records_to_process = @buffer.get_buffer
@buffer.reset_buffer
+ @container_cpu_memory_records = []
health_monitor_records = []
records_to_process.each do |record|
@@ -117,7 +131,6 @@ def filter_stream(tag, es)
@provider.get_config(monitor_id),
record[HealthMonitorRecordFields::DETAILS]
)
-
health_monitor_records.push(health_monitor_record)
#puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}"
end
@@ -159,6 +172,8 @@ def filter_stream(tag, es)
@log.info "after Adding missing signals all_records.size #{all_records.size}"
+ HealthMonitorHelpers.add_agentpool_node_label_if_not_present(all_records)
+
# build the health model
@model_builder.process_records(all_records)
all_monitors = @model_builder.finalize_model
@@ -169,7 +184,8 @@ def filter_stream(tag, es)
all_monitors.each{|monitor_instance_id, monitor|
if monitor.is_aggregate_monitor
@state.update_state(monitor,
- @provider.get_config(monitor.monitor_id)
+ @provider.get_config(monitor.monitor_id),
+ true
)
end
@@ -185,23 +201,36 @@ def filter_stream(tag, es)
@log.info "after optimizing health signals all_monitors.size #{all_monitors.size}"
+ current_time = Time.now
+ emit_time = current_time.to_f
# for each key in monitor.keys,
# get the state from health_monitor_state
# generate the record to send
all_monitors.keys.each{|key|
record = @provider.get_record(all_monitors[key], state)
- if record[HealthMonitorRecordFields::MONITOR_ID] == MonitorId::CLUSTER && all_monitors.size > 1
- old_state = record[HealthMonitorRecordFields::OLD_STATE]
- new_state = record[HealthMonitorRecordFields::NEW_STATE]
- if old_state != new_state && @cluster_old_state != old_state && @cluster_new_state != new_state
- ApplicationInsightsUtility.sendCustomEvent("HealthModel_ClusterStateChanged",{"old_state" => old_state , "new_state" => new_state, "monitor_count" => all_monitors.size})
- @log.info "sent telemetry for cluster state change from #{record['OldState']} to #{record['NewState']}"
- @cluster_old_state = old_state
- @cluster_new_state = new_state
+ if record[HealthMonitorRecordFields::MONITOR_ID] == MonitorId::CLUSTER
+ if !record[HealthMonitorRecordFields::DETAILS].nil?
+ details = JSON.parse(record[HealthMonitorRecordFields::DETAILS])
+ details[HealthMonitorRecordFields::HEALTH_MODEL_DEFINITION_VERSION] = "#{ENV['HEALTH_MODEL_DEFINITION_VERSION']}"
+ record[HealthMonitorRecordFields::DETAILS] = details.to_json
+ end
+ if all_monitors.size > 1
+ old_state = record[HealthMonitorRecordFields::OLD_STATE]
+ new_state = record[HealthMonitorRecordFields::NEW_STATE]
+ if old_state != new_state && @cluster_old_state != old_state && @cluster_new_state != new_state
+ ApplicationInsightsUtility.sendCustomEvent("HealthModel_ClusterStateChanged",{"old_state" => old_state , "new_state" => new_state, "monitor_count" => all_monitors.size})
+ @log.info "sent telemetry for cluster state change from #{record['OldState']} to #{record['NewState']}"
+ @cluster_old_state = old_state
+ @cluster_new_state = new_state
+ end
end
end
- #@log.info "#{record["Details"]} #{record["MonitorInstanceId"]} #{record["OldState"]} #{record["NewState"]}"
- new_es.add(time, record)
+ record_wrapper = {
+ "DataType" => "KUBE_HEALTH_BLOB",
+ "IPName" => "ContainerInsights",
+ "DataItems" => [record.each { |k, v| record[k] = v }],
+ }
+ new_es.add(emit_time, record_wrapper)
}
#emit the stream
@@ -215,8 +244,8 @@ def filter_stream(tag, es)
@cluster_health_state.update_state(@state.to_h)
# return an empty event stream, else the match will throw a NoMethodError
- return []
- elsif tag.start_with?("oms.api.KubeHealth.AgentCollectionTime")
+ return MultiEventStream.new
+ elsif tag.start_with?("kubehealth.Signals")
# this filter also acts as a pass through as we are rewriting the tag and emitting to the fluent stream
es
else
diff --git a/source/code/plugin/health/agg_monitor_id_labels.rb b/source/code/plugin/health/agg_monitor_id_labels.rb
index 86a3381cd..bb016adb4 100644
--- a/source/code/plugin/health/agg_monitor_id_labels.rb
+++ b/source/code/plugin/health/agg_monitor_id_labels.rb
@@ -1,5 +1,3 @@
-require_relative 'health_model_constants'
-
module HealthModel
class AggregateMonitorInstanceIdLabels
@@id_labels_mapping = {
@@ -8,12 +6,9 @@ class AggregateMonitorInstanceIdLabels
MonitorId::NODE => [HealthMonitorLabels::AGENTPOOL, HealthMonitorLabels::ROLE, HealthMonitorLabels::HOSTNAME],
MonitorId::NAMESPACE => [HealthMonitorLabels::NAMESPACE],
MonitorId::AGENT_NODE_POOL => [HealthMonitorLabels::AGENTPOOL],
- # MonitorId::ALL_AGENT_NODE_POOLS => [],
- # MonitorId::ALL_NODE_POOLS => [],
- # MonitorId::ALL_NODES => [],
- # MonitorId::K8S_INFRASTRUCTURE => [],
- # MonitorId::CLUSTER => [],
- # MonitorId::WORKLOAD => []
+ MonitorId::CONTAINER => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME, HealthMonitorLabels::CONTAINER],
+ MonitorId::CONTAINER_CPU_MONITOR_ID => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME],
+ MonitorId::CONTAINER_MEMORY_MONITOR_ID => [HealthMonitorLabels::NAMESPACE, HealthMonitorLabels::WORKLOAD_NAME],
}
def self.get_labels_for(monitor_id)
diff --git a/source/code/plugin/health/health_container_cpu_memory_aggregator.rb b/source/code/plugin/health/health_container_cpu_memory_aggregator.rb
new file mode 100644
index 000000000..e98c288b3
--- /dev/null
+++ b/source/code/plugin/health/health_container_cpu_memory_aggregator.rb
@@ -0,0 +1,258 @@
+require_relative 'health_model_constants'
+=begin
+ @cpu_records/@memory_records
+ [
+ {
+ "namespace_workload_container_name" : {
+ "limit" : limit, #number
+ "limit_set" : limit_set, #bool
+ "record_count" : record_count, #number
+ "workload_name": workload_name,
+ "workload_kind": workload_kind,
+ "namespace" : namespace,
+ "container": container,
+ records:[
+ {
+ "counter_value": counter_value,
+ "pod_name": pod_name,
+ "container": container,
+ "state" : state
+ },
+ {
+ "counter_value": counter_value,
+ "pod_name": pod_name,
+ "container": container,
+ "state" : state
+ }
+ ]
+ }
+ }
+ ]
+=end
+module HealthModel
+ # this class aggregates the records at the container level
+ class HealthContainerCpuMemoryAggregator
+
+ attr_reader :pod_uid_lookup, :workload_container_count, :cpu_records, :memory_records, :provider
+
+ @@memory_counter_name = 'memoryRssBytes'
+ @@cpu_counter_name = 'cpuUsageNanoCores'
+ def initialize(resources, provider)
+ @pod_uid_lookup = resources.get_pod_uid_lookup
+ @workload_container_count = resources.get_workload_container_count
+ @cpu_records = {}
+ @memory_records = {}
+ @log = HealthMonitorHelpers.get_log_handle
+ @provider = provider
+ end
+
+ def dedupe_records(container_records)
+ cpu_deduped_instances = {}
+ memory_deduped_instances = {}
+ container_records = container_records.select{|record| record['CounterName'] == @@memory_counter_name || record['CounterName'] == @@cpu_counter_name}
+
+ container_records.each do |record|
+ begin
+ instance_name = record["InstanceName"]
+ counter_name = record["CounterName"]
+ case counter_name
+ when @@memory_counter_name
+ resource_instances = memory_deduped_instances
+ when @@cpu_counter_name
+ resource_instances = cpu_deduped_instances
+ else
+ @log.info "Unexpected Counter Name #{counter_name}"
+ next
+ end
+ if !resource_instances.key?(instance_name)
+ resource_instances[instance_name] = record
+ else
+ r = resource_instances[instance_name]
+ if record["Timestamp"] > r["Timestamp"]
+ @log.info "Dropping older record"
+ resource_instances[instance_name] = record
+ end
+ end
+ rescue => e
+ @log.info "Exception when deduping record #{record}"
+ end
+ end
+ return cpu_deduped_instances.values.concat(memory_deduped_instances.values)
+ end
+
+ def aggregate(container_records)
+ #filter and select only cpuUsageNanoCores and memoryRssBytes
+ container_records = container_records.select{|record| record['CounterName'] == @@memory_counter_name || record['CounterName'] == @@cpu_counter_name}
+ # poduid lookup has poduid/cname --> workload_name, namespace, cpu_limit, memory limit mapping
+ # from the container records, extract the poduid/cname, get the values from poduid_lookup, and aggregate based on namespace_workload_cname
+ container_records.each do |record|
+ begin
+ instance_name = record["InstanceName"]
+ lookup_key = instance_name.split('/').last(2).join('/')
+ if !@pod_uid_lookup.key?(lookup_key)
+ next
+ end
+ namespace = @pod_uid_lookup[lookup_key]['namespace']
+ workload_name = @pod_uid_lookup[lookup_key]['workload_name']
+ cname = lookup_key.split('/')[1]
+ counter_name = record["CounterName"]
+ case counter_name
+ when @@memory_counter_name
+ resource_hash = @memory_records
+ resource_type = 'memory'
+ when @@cpu_counter_name
+ resource_hash = @cpu_records
+ resource_type = 'cpu'
+ else
+ @log.info "Unexpected Counter Name #{counter_name}"
+ next
+ end
+
+ # this is used as a look up from the pod_uid_lookup in kubernetes_health_resources object
+ resource_hash_key = "#{namespace}_#{workload_name.split('~~')[1]}_#{cname}"
+
+ # if the resource map doesnt contain the key, add limit, count and records
+ if !resource_hash.key?(resource_hash_key)
+ resource_hash[resource_hash_key] = {}
+ resource_hash[resource_hash_key]["limit"] = @pod_uid_lookup[lookup_key]["#{resource_type}_limit"]
+ resource_hash[resource_hash_key]["limit_set"] = @pod_uid_lookup[lookup_key]["#{resource_type}_limit_set"]
+ resource_hash[resource_hash_key]["record_count"] = @workload_container_count[resource_hash_key]
+ resource_hash[resource_hash_key]["workload_name"] = @pod_uid_lookup[lookup_key]["workload_name"]
+ resource_hash[resource_hash_key]["workload_kind"] = @pod_uid_lookup[lookup_key]["workload_kind"]
+ resource_hash[resource_hash_key]["namespace"] = @pod_uid_lookup[lookup_key]["namespace"]
+ resource_hash[resource_hash_key]["container"] = @pod_uid_lookup[lookup_key]["container"]
+ resource_hash[resource_hash_key]["records"] = []
+ end
+
+ container_instance_record = {}
+
+ pod_name = @pod_uid_lookup[lookup_key]["pod_name"]
+ #append the record to the hash
+ # append only if the record is not a duplicate record
+ container_instance_record["pod_name"] = pod_name
+ container_instance_record["counter_value"] = record["CounterValue"]
+ container_instance_record["container"] = @pod_uid_lookup[lookup_key]["container"]
+ container_instance_record["state"] = calculate_container_instance_state(
+ container_instance_record["counter_value"],
+ resource_hash[resource_hash_key]["limit"],
+ @provider.get_config(MonitorId::CONTAINER_MEMORY_MONITOR_ID))
+ resource_hash[resource_hash_key]["records"].push(container_instance_record)
+ rescue => e
+ @log.info "Error in HealthContainerCpuMemoryAggregator aggregate #{e.backtrace} #{e.message} #{record}"
+ end
+ end
+ end
+
+ def compute_state()
+ # if missing records, set state to unknown
+ # if limits not set, set state to warning
+ # if all records present, sort in descending order of metric, compute index based on StateThresholdPercentage, get the state (pass/fail/warn) based on monitor state (Using [Fail/Warn]ThresholdPercentage, and set the state)
+ @memory_records.each{|k,v|
+ calculate_monitor_state(v, @provider.get_config(MonitorId::CONTAINER_MEMORY_MONITOR_ID))
+ }
+
+ @cpu_records.each{|k,v|
+ calculate_monitor_state(v, @provider.get_config(MonitorId::CONTAINER_CPU_MONITOR_ID))
+ }
+
+ @log.info "Finished computing state"
+ end
+
+ def get_records
+ time_now = Time.now.utc.iso8601
+ container_cpu_memory_records = []
+
+ @cpu_records.each{|resource_key, record|
+ health_monitor_record = {
+ "timestamp" => time_now,
+ "state" => record["state"],
+ "details" => {
+ "cpu_limit_millicores" => record["limit"]/1000000.to_f,
+ "cpu_usage_instances" => record["records"].map{|r| r.each {|k,v|
+ k == "counter_value" ? r[k] = r[k] / 1000000.to_f : r[k]
+ }},
+ "workload_name" => record["workload_name"],
+ "workload_kind" => record["workload_kind"],
+ "namespace" => record["namespace"],
+ "container" => record["container"],
+ "limit_set" => record["limit_set"]
+ }
+ }
+
+ monitor_instance_id = HealthMonitorHelpers.get_monitor_instance_id(MonitorId::CONTAINER_CPU_MONITOR_ID, resource_key.split('_')) #container_cpu_utilization-namespace-workload-container
+
+ health_record = {}
+ health_record[HealthMonitorRecordFields::MONITOR_ID] = MonitorId::CONTAINER_CPU_MONITOR_ID
+ health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
+ health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
+ health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
+ container_cpu_memory_records.push(health_record)
+ }
+
+ @memory_records.each{|resource_key, record|
+ health_monitor_record = {
+ "timestamp" => time_now,
+ "state" => record["state"],
+ "details" => {
+ "memory_limit_bytes" => record["limit"],
+ "memory_usage_instances" => record["records"],
+ "workload_name" => record["workload_name"],
+ "workload_kind" => record["workload_kind"],
+ "namespace" => record["namespace"],
+ "container" => record["container"]
+ }
+ }
+
+ monitor_instance_id = HealthMonitorHelpers.get_monitor_instance_id(MonitorId::CONTAINER_MEMORY_MONITOR_ID, resource_key.split('_')) #container_cpu_utilization-namespace-workload-container
+
+ health_record = {}
+ health_record[HealthMonitorRecordFields::MONITOR_ID] = MonitorId::CONTAINER_MEMORY_MONITOR_ID
+ health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
+ health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
+ health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
+ container_cpu_memory_records.push(health_record)
+ }
+ return container_cpu_memory_records
+ end
+
+ private
+ def calculate_monitor_state(v, config)
+ if !v['limit_set'] && v['namespace'] != 'kube-system'
+ v["state"] = HealthMonitorStates::WARNING
+ else
+ # sort records by descending order of metric
+ v["records"] = v["records"].sort_by{|record| record["counter_value"]}.reverse
+ size = v["records"].size
+ if size < v["record_count"]
+ unknown_count = v["record_count"] - size
+ for i in unknown_count.downto(1)
+ # it requires a lot of computation to figure out which actual pod is not sending the signal
+ v["records"].insert(0, {"counter_value" => -1, "container" => v["container"], "pod_name" => "???", "state" => HealthMonitorStates::UNKNOWN }) #insert -1 for unknown records
+ end
+ end
+
+ if size == 1
+ state_index = 0
+ else
+ state_threshold = config['StateThresholdPercentage'].to_f
+ count = ((state_threshold*size)/100).ceil
+ state_index = size - count
+ end
+ v["state"] = v["records"][state_index]["state"]
+ end
+ end
+
+ def calculate_container_instance_state(counter_value, limit, config)
+ percent_value = counter_value * 100 / limit
+ if percent_value > config['FailThresholdPercentage']
+ return HealthMonitorStates::FAIL
+ elsif percent_value > config['WarnThresholdPercentage']
+ return HealthMonitorStates::WARN
+ else
+ return HealthMonitorStates::PASS
+ end
+ end
+ end
+end
\ No newline at end of file
diff --git a/source/code/plugin/health/health_container_cpu_memory_record_formatter.rb b/source/code/plugin/health/health_container_cpu_memory_record_formatter.rb
new file mode 100644
index 000000000..5c7db82d9
--- /dev/null
+++ b/source/code/plugin/health/health_container_cpu_memory_record_formatter.rb
@@ -0,0 +1,34 @@
+module HealthModel
+ class HealthContainerCpuMemoryRecordFormatter
+
+ @@health_container_cpu_memory_record_template = '{
+ "InstanceName": "%{instance_name}",
+ "CounterName" : "%{counter_name}",
+ "CounterValue" : %{metric_value},
+ "Timestamp" : "%{timestamp}"
+ }'
+ def initialize
+ @log = HealthMonitorHelpers.get_log_handle
+ end
+
+ def get_record_from_cadvisor_record(cadvisor_record)
+ begin
+ instance_name = cadvisor_record['DataItems'][0]['InstanceName']
+ counter_name = cadvisor_record['DataItems'][0]['Collections'][0]['CounterName']
+ metric_value = cadvisor_record['DataItems'][0]['Collections'][0]['Value']
+ timestamp = cadvisor_record['DataItems'][0]['Timestamp']
+
+ health_container_cpu_memory_record = @@health_container_cpu_memory_record_template % {
+ instance_name: instance_name,
+ counter_name: counter_name,
+ metric_value: metric_value,
+ timestamp: timestamp
+ }
+ return JSON.parse(health_container_cpu_memory_record)
+ rescue => e
+ @log.info "Error in get_record_from_cadvisor_record #{e.message} #{e.backtrace}"
+ return nil
+ end
+ end
+ end
+end
\ No newline at end of file
diff --git a/source/code/plugin/health/health_kube_api_down_handler.rb b/source/code/plugin/health/health_kube_api_down_handler.rb
index 7f72360f8..a87c43ef1 100644
--- a/source/code/plugin/health/health_kube_api_down_handler.rb
+++ b/source/code/plugin/health/health_kube_api_down_handler.rb
@@ -2,11 +2,11 @@
module HealthModel
class HealthKubeApiDownHandler
def initialize
- @@monitors_to_change = [HealthMonitorConstants::WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID,
- HealthMonitorConstants::WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID,
- HealthMonitorConstants::NODE_CONDITION_MONITOR_ID,
- HealthMonitorConstants::USER_WORKLOAD_PODS_READY_MONITOR_ID,
- HealthMonitorConstants::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID]
+ @@monitors_to_change = [MonitorId::WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID,
+ MonitorId::WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID,
+ MonitorId::NODE_CONDITION_MONITOR_ID,
+ MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID,
+ MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID]
end
# update kube-api dependent monitors to be 'unknown' if kube-api is down or monitor is unavailable
@@ -14,7 +14,7 @@ def handle_kube_api_down(health_monitor_records)
health_monitor_records_map = {}
health_monitor_records.map{|record| health_monitor_records_map[record.monitor_instance_id] = record}
- if !health_monitor_records_map.key?(HealthMonitorConstants::KUBE_API_STATUS) || (health_monitor_records_map.key?(HealthMonitorConstants::KUBE_API_STATUS) && health_monitor_records_map[HealthMonitorConstants::KUBE_API_STATUS].state != 'pass')
+ if !health_monitor_records_map.key?(MonitorId::KUBE_API_STATUS) || (health_monitor_records_map.key?(MonitorId::KUBE_API_STATUS) && health_monitor_records_map[MonitorId::KUBE_API_STATUS].state != 'pass')
#iterate over the map and set the state to unknown for related monitors
health_monitor_records.each{|health_monitor_record|
if @@monitors_to_change.include?(health_monitor_record.monitor_id)
diff --git a/source/code/plugin/health/health_kubernetes_resources.rb b/source/code/plugin/health/health_kubernetes_resources.rb
index 2f591722b..30a9ac7ca 100644
--- a/source/code/plugin/health/health_kubernetes_resources.rb
+++ b/source/code/plugin/health/health_kubernetes_resources.rb
@@ -5,8 +5,8 @@ module HealthModel
class HealthKubernetesResources
include Singleton
- attr_accessor :node_inventory, :pod_inventory, :deployment_inventory
- attr_reader :nodes, :pods, :workloads
+ attr_accessor :node_inventory, :pod_inventory, :deployment_inventory, :pod_uid_lookup, :workload_container_count
+ attr_reader :nodes, :pods, :workloads, :deployment_lookup
def initialize
@node_inventory = []
@@ -16,6 +16,9 @@ def initialize
@pods = []
@workloads = []
@log = HealthMonitorHelpers.get_log_handle
+ @pod_uid_lookup = {}
+ @deployment_lookup = {}
+ @workload_container_count = {}
end
def get_node_inventory
@@ -33,71 +36,255 @@ def get_nodes
return @nodes
end
- def get_pod_inventory
- return @pod_inventory
- end
-
- def get_pods
- return @pods
+ def set_deployment_inventory(deployments)
+ @deployment_inventory = deployments
+ @deployment_lookup = {}
end
def get_workload_names
- @pods = []
workload_names = {}
- deployment_lookup = {}
- @deployment_inventory['items'].each do |deployment|
- match_labels = deployment['spec']['selector']['matchLabels'].to_h
- namespace = deployment['metadata']['namespace']
- match_labels.each{|k,v|
- deployment_lookup["#{namespace}-#{k}=#{v}"] = "#{deployment['metadata']['namespace']}~~#{deployment['metadata']['name']}"
- }
+ @pod_inventory['items'].each do |pod|
+ workload_name = get_workload_name(pod)
+ workload_names[workload_name] = true if workload_name
end
+ return workload_names.keys
+ end
+
+ def build_pod_uid_lookup
+ @workload_container_count = {}
@pod_inventory['items'].each do |pod|
begin
- has_owner = !pod['metadata']['ownerReferences'].nil?
- owner_kind = ''
- if has_owner
- owner_kind = pod['metadata']['ownerReferences'][0]['kind']
- controller_name = pod['metadata']['ownerReferences'][0]['name']
- else
- owner_kind = pod['kind']
- controller_name = pod['metadata']['name']
+ namespace = pod['metadata']['namespace']
+ poduid = pod['metadata']['uid']
+ pod_name = pod['metadata']['name']
+ workload_name = get_workload_name(pod)
+ workload_kind = get_workload_kind(pod)
+ # we don't show jobs in container health
+ if workload_kind.casecmp('job') == 0
+ next
+ end
+ pod['spec']['containers'].each do |container|
+ cname = container['name']
+ key = "#{poduid}/#{cname}"
+ cpu_limit_set = true
+ memory_limit_set = true
+ begin
+ cpu_limit = get_numeric_value('cpu', container['resources']['limits']['cpu'])
+ rescue => exception
+ #@log.info "Exception getting container cpu limit #{container['resources']}"
+ cpu_limit = get_node_capacity(pod['spec']['nodeName'], 'cpu')
+ cpu_limit_set = false
+ end
+ begin
+ memory_limit = get_numeric_value('memory', container['resources']['limits']['memory'])
+ rescue => exception
+ #@log.info "Exception getting container memory limit #{container['resources']}"
+ memory_limit = get_node_capacity(pod['spec']['nodeName'], 'memory')
+ memory_limit_set = false
+ end
+ @pod_uid_lookup[key] = {"workload_kind" => workload_kind, "workload_name" => workload_name, "namespace" => namespace, "cpu_limit" => cpu_limit, "memory_limit" => memory_limit, "cpu_limit_set" => cpu_limit_set, "memory_limit_set" => memory_limit_set, "container" => cname, "pod_name" => pod_name}
+ container_count_key = "#{namespace}_#{workload_name.split('~~')[1]}_#{cname}"
+ if !@workload_container_count.key?(container_count_key)
+ @workload_container_count[container_count_key] = 1
+ else
+ count = @workload_container_count[container_count_key]
+ @workload_container_count[container_count_key] = count + 1
+ end
end
+ rescue => e
+ @log.info "Error in build_pod_uid_lookup #{pod} #{e.message}"
+ end
+ end
+ end
- namespace = pod['metadata']['namespace']
+ def get_pod_uid_lookup
+ return @pod_uid_lookup
+ end
- workload_name = ''
- if owner_kind.nil?
- owner_kind = 'Pod'
- end
- case owner_kind.downcase
- when 'job'
- # we are excluding jobs
- next
- when 'replicaset'
- # get the labels, and see if there is a match. If there is, it is the deployment. If not, use replica set name/controller name
- labels = pod['metadata']['labels'].to_h
- labels.each {|k,v|
- lookup_key = "#{namespace}-#{k}=#{v}"
- if deployment_lookup.key?(lookup_key)
- workload_name = deployment_lookup[lookup_key]
- break
- end
- }
- if workload_name.empty?
- workload_name = "#{namespace}~~#{controller_name}"
+ def get_workload_container_count
+ return @workload_container_count
+ end
+
+ private
+ def get_workload_name(pod)
+
+ if @deployment_lookup.empty?
+ @deployment_inventory['items'].each do |deployment|
+ match_labels = deployment['spec']['selector']['matchLabels'].to_h
+ namespace = deployment['metadata']['namespace']
+ match_labels.each{|k,v|
+ @deployment_lookup["#{namespace}-#{k}=#{v}"] = "#{deployment['metadata']['namespace']}~~#{deployment['metadata']['name']}"
+ }
+ end
+ end
+
+ begin
+ has_owner = !pod['metadata']['ownerReferences'].nil?
+ owner_kind = ''
+ if has_owner
+ owner_kind = pod['metadata']['ownerReferences'][0]['kind']
+ controller_name = pod['metadata']['ownerReferences'][0]['name']
+ else
+ owner_kind = pod['kind']
+ controller_name = pod['metadata']['name']
+ end
+ namespace = pod['metadata']['namespace']
+
+ workload_name = ''
+ if owner_kind.nil?
+ owner_kind = 'Pod'
+ end
+ case owner_kind.downcase
+ when 'job'
+ # we are excluding jobs
+ return nil
+ when 'replicaset'
+ # get the labels, and see if there is a match. If there is, it is the deployment. If not, use replica set name/controller name
+ labels = pod['metadata']['labels'].to_h
+ labels.each {|k,v|
+ lookup_key = "#{namespace}-#{k}=#{v}"
+ if @deployment_lookup.key?(lookup_key)
+ workload_name = @deployment_lookup[lookup_key]
+ break
end
- when 'daemonset'
+ }
+ if workload_name.empty?
workload_name = "#{namespace}~~#{controller_name}"
- else
- workload_name = "#{namespace}~~#{pod['metadata']['name']}"
end
- rescue => e
- @log.info "Error when processing pod #{pod['metadata']['name']} #{e.message}"
+ when 'daemonset'
+ workload_name = "#{namespace}~~#{controller_name}"
+ else
+ workload_name = "#{namespace}~~#{pod['metadata']['name']}"
end
- workload_names[workload_name] = true
+ return workload_name
+ rescue => e
+ @log.info "Error in get_workload_name(pod) #{e.message}"
+ return nil
+ end
+ end
+
+ def get_workload_kind(pod)
+ if @deployment_lookup.empty?
+ @deployment_inventory['items'].each do |deployment|
+ match_labels = deployment['spec']['selector']['matchLabels'].to_h
+ namespace = deployment['metadata']['namespace']
+ match_labels.each{|k,v|
+ @deployment_lookup["#{namespace}-#{k}=#{v}"] = "#{deployment['metadata']['namespace']}~~#{deployment['metadata']['name']}"
+ }
+ end
+ end
+
+ begin
+ has_owner = !pod['metadata']['ownerReferences'].nil?
+ owner_kind = ''
+ if has_owner
+ owner_kind = pod['metadata']['ownerReferences'][0]['kind']
+ else
+ owner_kind = pod['kind']
+ end
+
+ if owner_kind.nil?
+ owner_kind = 'Pod'
+ end
+ return owner_kind
+ rescue => e
+ @log.info "Error in get_workload_kind(pod) #{e.message}"
+ return nil
end
- return workload_names.keys
end
+
+ def get_node_capacity(node_name, type)
+ if node_name.nil? #unscheduled pods will not have a node name
+ return -1
+ end
+ begin
+ @node_inventory["items"].each do |node|
+ if (!node["status"]["capacity"].nil?) && node["metadata"]["name"].casecmp(node_name.downcase) == 0
+ return get_numeric_value(type, node["status"]["capacity"][type])
+ end
+ end
+ rescue => e
+ @log.info "Error in get_node_capacity(pod, #{type}) #{e.backtrace} #{e.message}"
+ return -1
+ end
+ end
+
+ #Cannot reuse the code from KubernetesApiClient, for unit testing reasons. KubernetesApiClient has a dependency on oms_common.rb etc.
+ def get_numeric_value(metricName, metricVal)
+ metricValue = metricVal.downcase
+ begin
+ case metricName
+ when "memory" #convert to bytes for memory
+ #https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/
+ if (metricValue.end_with?("ki"))
+ metricValue.chomp!("ki")
+ metricValue = Float(metricValue) * 1024.0 ** 1
+ elsif (metricValue.end_with?("mi"))
+ metricValue.chomp!("mi")
+ metricValue = Float(metricValue) * 1024.0 ** 2
+ elsif (metricValue.end_with?("gi"))
+ metricValue.chomp!("gi")
+ metricValue = Float(metricValue) * 1024.0 ** 3
+ elsif (metricValue.end_with?("ti"))
+ metricValue.chomp!("ti")
+ metricValue = Float(metricValue) * 1024.0 ** 4
+ elsif (metricValue.end_with?("pi"))
+ metricValue.chomp!("pi")
+ metricValue = Float(metricValue) * 1024.0 ** 5
+ elsif (metricValue.end_with?("ei"))
+ metricValue.chomp!("ei")
+ metricValue = Float(metricValue) * 1024.0 ** 6
+ elsif (metricValue.end_with?("zi"))
+ metricValue.chomp!("zi")
+ metricValue = Float(metricValue) * 1024.0 ** 7
+ elsif (metricValue.end_with?("yi"))
+ metricValue.chomp!("yi")
+ metricValue = Float(metricValue) * 1024.0 ** 8
+ elsif (metricValue.end_with?("k"))
+ metricValue.chomp!("k")
+ metricValue = Float(metricValue) * 1000.0 ** 1
+ elsif (metricValue.end_with?("m"))
+ metricValue.chomp!("m")
+ metricValue = Float(metricValue) * 1000.0 ** 2
+ elsif (metricValue.end_with?("g"))
+ metricValue.chomp!("g")
+ metricValue = Float(metricValue) * 1000.0 ** 3
+ elsif (metricValue.end_with?("t"))
+ metricValue.chomp!("t")
+ metricValue = Float(metricValue) * 1000.0 ** 4
+ elsif (metricValue.end_with?("p"))
+ metricValue.chomp!("p")
+ metricValue = Float(metricValue) * 1000.0 ** 5
+ elsif (metricValue.end_with?("e"))
+ metricValue.chomp!("e")
+ metricValue = Float(metricValue) * 1000.0 ** 6
+ elsif (metricValue.end_with?("z"))
+ metricValue.chomp!("z")
+ metricValue = Float(metricValue) * 1000.0 ** 7
+ elsif (metricValue.end_with?("y"))
+ metricValue.chomp!("y")
+ metricValue = Float(metricValue) * 1000.0 ** 8
+ else #assuming there are no units specified, it is bytes (the below conversion will fail for other unsupported 'units')
+ metricValue = Float(metricValue)
+ end
+ when "cpu" #convert to nanocores for cpu
+ #https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/
+ if (metricValue.end_with?("m"))
+ metricValue.chomp!("m")
+ metricValue = Float(metricValue) * 1000.0 ** 2
+ else #assuming no units specified, it is cores that we are converting to nanocores (the below conversion will fail for other unsupported 'units')
+ metricValue = Float(metricValue) * 1000.0 ** 3
+ end
+ else
+ @Log.warn("getMetricNumericValue: Unsupported metric #{metricName}. Returning 0 for metric value")
+ metricValue = 0
+ end #case statement
+ rescue => error
+ @Log.warn("getMetricNumericValue failed: #{error} for metric #{metricName} with value #{metricVal}. Returning 0 formetric value")
+ return 0
+ end
+ return metricValue
+ end
+
end
end
\ No newline at end of file
diff --git a/source/code/plugin/health/health_missing_signal_generator.rb b/source/code/plugin/health/health_missing_signal_generator.rb
index 419680afa..1827a0190 100644
--- a/source/code/plugin/health/health_missing_signal_generator.rb
+++ b/source/code/plugin/health/health_missing_signal_generator.rb
@@ -24,14 +24,14 @@ def get_missing_signals(cluster_id, health_monitor_records, health_k8s_inventory
node_signals_hash = {}
nodes.each{|node|
- node_signals_hash[node] = [HealthMonitorConstants::NODE_CPU_MONITOR_ID, HealthMonitorConstants::NODE_MEMORY_MONITOR_ID, HealthMonitorConstants::NODE_CONDITION_MONITOR_ID]
+ node_signals_hash[node] = [MonitorId::NODE_MEMORY_MONITOR_ID, MonitorId::NODE_CPU_MONITOR_ID, MonitorId::NODE_CONDITION_MONITOR_ID]
}
log = HealthMonitorHelpers.get_log_handle
log.info "last_received_records #{@last_received_records.size} nodes #{nodes}"
@last_received_records.each{|monitor_instance_id, monitor|
if !health_monitor_records_map.key?(monitor_instance_id)
if HealthMonitorHelpers.is_node_monitor(monitor.monitor_id)
- node_name = monitor.labels['kubernetes.io/hostname']
+ node_name = monitor.labels[HealthMonitorLabels::HOSTNAME]
new_monitor = HealthMonitorRecord.new(
monitor.monitor_id,
monitor.monitor_instance_id,
@@ -83,7 +83,7 @@ def get_missing_signals(cluster_id, health_monitor_records, health_k8s_inventory
health_monitor_records.each{|health_monitor_record|
# remove signals from the list of expected signals if we see them in the list of current signals
if HealthMonitorHelpers.is_node_monitor(health_monitor_record.monitor_id)
- node_name = health_monitor_record.labels['kubernetes.io/hostname']
+ node_name = health_monitor_record.labels[HealthMonitorLabels::HOSTNAME]
if node_signals_hash.key?(node_name)
signals = node_signals_hash[node_name]
signals.delete(health_monitor_record.monitor_id)
@@ -111,7 +111,7 @@ def get_missing_signals(cluster_id, health_monitor_records, health_k8s_inventory
{"timestamp" => Time.now.utc.iso8601, "state" => HealthMonitorStates::UNKNOWN, "details" => "no signal received from node #{node}"}
)
missing_signals_map[monitor_instance_id] = new_monitor
- log.info "Added missing signal when node_signals_hash was not empty #{new_monitor.monitor_instance_id} #{new_monitor.state}"
+ log.info "Added missing signal when node_signals_hash was not empty #{new_monitor.monitor_instance_id} #{new_monitor.state} #{new_monitor.labels.keys}"
}
}
end
diff --git a/source/code/plugin/health/health_model_constants.rb b/source/code/plugin/health/health_model_constants.rb
index 82ae569f3..0922c7ff2 100644
--- a/source/code/plugin/health/health_model_constants.rb
+++ b/source/code/plugin/health/health_model_constants.rb
@@ -2,80 +2,80 @@ module HealthModel
class MonitorState
CRITICAL = "fail"
ERROR = "err"
- WARNING = "warn"
- NONE = "none"
HEALTHY = "pass"
+ NONE = "none"
UNKNOWN = "unknown"
+ WARNING = "warn"
end
class AggregationAlgorithm
- WORSTOF = "worstOf"
PERCENTAGE = "percentage"
+ WORSTOF = "worstOf"
end
class MonitorId
- CLUSTER = 'cluster';
- ALL_NODES = 'all_nodes';
- K8S_INFRASTRUCTURE = 'k8s_infrastructure'
-
- NODE = 'node';
AGENT_NODE_POOL = 'agent_node_pool'
- MASTER_NODE_POOL = 'master_node_pool'
ALL_AGENT_NODE_POOLS = 'all_agent_node_pools'
- ALL_NODE_POOLS = 'all_node_pools';
-
- WORKLOAD = 'all_workloads';
- CAPACITY = 'capacity';
-
- USER_WORKLOAD = 'user_workload';
- SYSTEM_WORKLOAD = 'system_workload'
+ ALL_NODE_POOLS = 'all_node_pools'
+ ALL_NODES = 'all_nodes'
+ CAPACITY = 'capacity'
+ CLUSTER = 'cluster'
+ CONTAINER = 'container'
+ CONTAINER_CPU_MONITOR_ID = "container_cpu_utilization"
+ CONTAINER_MEMORY_MONITOR_ID = "container_memory_utilization"
+ K8S_INFRASTRUCTURE = 'k8s_infrastructure'
+ KUBE_API_STATUS = "kube_api_status"
+ MASTER_NODE_POOL = 'master_node_pool'
NAMESPACE = 'namespace';
+ NODE = 'node';
+ NODE_CONDITION_MONITOR_ID = "node_condition"
+ NODE_CPU_MONITOR_ID = "node_cpu_utilization"
+ NODE_MEMORY_MONITOR_ID = "node_memory_utilization"
+ SYSTEM_WORKLOAD = 'system_workload'
+ SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID = "system_workload_pods_ready"
+ USER_WORKLOAD = 'user_workload';
+ USER_WORKLOAD_PODS_READY_MONITOR_ID = "user_workload_pods_ready"
+ WORKLOAD = 'all_workloads';
+ WORKLOAD_CONTAINER_CPU_PERCENTAGE_MONITOR_ID = "container_cpu_utilization"
+ WORKLOAD_CONTAINER_MEMORY_PERCENTAGE_MONITOR_ID = "container_memory_utilization"
+ WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID = "subscribed_capacity_cpu"
+ WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID = "subscribed_capacity_memory"
end
class HealthMonitorRecordFields
CLUSTER_ID = "ClusterId"
- MONITOR_ID = "MonitorId"
- MONITOR_INSTANCE_ID = "MonitorInstanceId"
- MONITOR_LABELS = "MonitorLabels"
DETAILS = "Details"
+ HEALTH_MODEL_DEFINITION_VERSION = "HealthModelDefinitionVersion"
MONITOR_CONFIG = "MonitorConfig"
- OLD_STATE = "OldState"
+ MONITOR_ID = "MonitorTypeId"
+ MONITOR_INSTANCE_ID = "MonitorInstanceId"
+ MONITOR_LABELS = "MonitorLabels"
NEW_STATE = "NewState"
- AGENT_COLLECTION_TIME = "AgentCollectionTime"
- TIME_FIRST_OBSERVED = "TimeFirstObserved"
NODE_NAME = "NodeName"
- NAMESPACE = "Namespace"
- end
-
- class HealthMonitorConstants
- NODE_CPU_MONITOR_ID = "node_cpu_utilization"
- NODE_MEMORY_MONITOR_ID = "node_memory_utilization"
- CONTAINER_CPU_MONITOR_ID = "container_cpu_utilization"
- CONTAINER_MEMORY_MONITOR_ID = "container_memory_utilization"
- NODE_CONDITION_MONITOR_ID = "node_condition"
- WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID = "subscribed_capacity_cpu"
- WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID = "subscribed_capacity_memory"
- WORKLOAD_CONTAINER_CPU_PERCENTAGE_MONITOR_ID = "container_cpu_utilization"
- WORKLOAD_CONTAINER_MEMORY_PERCENTAGE_MONITOR_ID = "container_memory_utilization"
- KUBE_API_STATUS = "kube_api_status"
- USER_WORKLOAD_PODS_READY_MONITOR_ID = "user_workload_pods_ready"
- SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID = "system_workload_pods_ready"
+ OLD_STATE = "OldState"
+ PARENT_MONITOR_INSTANCE_ID = "ParentMonitorInstanceId"
+ TIME_FIRST_OBSERVED = "TimeFirstObserved"
+ TIME_GENERATED = "TimeGenerated"
end
class HealthMonitorStates
- PASS = "pass"
FAIL = "fail"
- WARNING = "warn"
NONE = "none"
+ PASS = "pass"
UNKNOWN = "unknown"
+ WARNING = "warn"
end
class HealthMonitorLabels
- WORKLOAD_NAME = "container.azm.ms/workload-name"
- WORKLOAD_KIND = "container.azm.ms/workload-kind"
- NAMESPACE = "container.azm.ms/namespace"
AGENTPOOL = "agentpool"
- ROLE = "kubernetes.io/role"
+ CONTAINER = "container.azm.ms/container"
HOSTNAME = "kubernetes.io/hostname"
+ NAMESPACE = "container.azm.ms/namespace"
+ ROLE = "kubernetes.io/role"
+ WORKLOAD_KIND = "container.azm.ms/workload-kind"
+ WORKLOAD_NAME = "container.azm.ms/workload-name"
+ MASTERROLE = "node-role.kubernetes.io/master"
+ COMPUTEROLE = "node-role.kubernetes.io/compute"
+ INFRAROLE = "node-role.kubernetes.io/infra"
end
end
\ No newline at end of file
diff --git a/source/code/plugin/health/health_monitor_helpers.rb b/source/code/plugin/health/health_monitor_helpers.rb
index 9f0315978..4efd4c608 100644
--- a/source/code/plugin/health/health_monitor_helpers.rb
+++ b/source/code/plugin/health/health_monitor_helpers.rb
@@ -16,11 +16,11 @@ class HealthMonitorHelpers
class << self
def is_node_monitor(monitor_id)
- return (monitor_id == HealthMonitorConstants::NODE_CPU_MONITOR_ID || monitor_id == HealthMonitorConstants::NODE_MEMORY_MONITOR_ID || monitor_id == HealthMonitorConstants::NODE_CONDITION_MONITOR_ID)
+ return (monitor_id == MonitorId::NODE_CPU_MONITOR_ID || monitor_id == MonitorId::NODE_MEMORY_MONITOR_ID || monitor_id == MonitorId::NODE_CONDITION_MONITOR_ID)
end
def is_pods_ready_monitor(monitor_id)
- return (monitor_id == HealthMonitorConstants::USER_WORKLOAD_PODS_READY_MONITOR_ID || monitor_id == HealthMonitorConstants::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID)
+ return (monitor_id == MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID || monitor_id == MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID)
end
def get_log_handle
@@ -31,6 +31,44 @@ def get_monitor_instance_id(monitor_id, args = [])
string_to_hash = args.join("/")
return "#{monitor_id}-#{Digest::MD5.hexdigest(string_to_hash)}"
end
+
+ def add_agentpool_node_label_if_not_present(records)
+ records.each{|record|
+ # continue if it is not a node monitor
+ if !is_node_monitor(record.monitor_id)
+ #@log.info "#{record.monitor_id} is not a NODE MONITOR"
+ next
+ end
+ labels_keys = record.labels.keys
+
+ if labels_keys.include?(HealthMonitorLabels::AGENTPOOL)
+ @log.info "#{record.monitor_id} includes agentpool label. Value = #{record.labels[HealthMonitorLabels::AGENTPOOL]}"
+ @log.info "Labels present = #{labels_keys}"
+ next
+ else
+ #@log.info "#{record} does not include agentpool label."
+ @log.info "Labels present = #{labels_keys}"
+ role_name = 'unknown'
+ if record.labels.include?(HealthMonitorLabels::ROLE)
+ role_name = record.labels[HealthMonitorLabels::ROLE]
+ elsif record.labels.include?(HealthMonitorLabels::MASTERROLE)
+ if !record.labels[HealthMonitorLabels::MASTERROLE].empty?
+ role_name = 'master'
+ end
+ elsif record.labels.include?(HealthMonitorLabels::COMPUTEROLE)
+ if !record.labels[HealthMonitorLabels::COMPUTEROLE].empty?
+ role_name = 'compute'
+ end
+ elsif record.labels.include?(HealthMonitorLabels::INFRAROLE)
+ if !record.labels[HealthMonitorLabels::INFRAROLE].empty?
+ role_name = 'infra'
+ end
+ end
+ @log.info "Adding agentpool label #{role_name}_node_pool for #{record.monitor_id}"
+ record.labels[HealthMonitorLabels::AGENTPOOL] = "#{role_name}_node_pool"
+ end
+ }
+ end
end
end
diff --git a/source/code/plugin/health/health_monitor_provider.rb b/source/code/plugin/health/health_monitor_provider.rb
index 60ad69d76..e75824268 100644
--- a/source/code/plugin/health/health_monitor_provider.rb
+++ b/source/code/plugin/health/health_monitor_provider.rb
@@ -66,8 +66,9 @@ def get_record(health_monitor_record, health_monitor_state)
monitor_record[HealthMonitorRecordFields::OLD_STATE] = old_state
monitor_record[HealthMonitorRecordFields::DETAILS] = details.to_json
monitor_record[HealthMonitorRecordFields::MONITOR_CONFIG] = config.to_json
- monitor_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = Time.now.utc.iso8601
+ monitor_record[HealthMonitorRecordFields::TIME_GENERATED] = Time.now.utc.iso8601
monitor_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_first_observed
+ monitor_record[HealthMonitorRecordFields::PARENT_MONITOR_INSTANCE_ID] = ''
return monitor_record
end
@@ -87,17 +88,28 @@ def get_labels(health_monitor_record)
}
monitor_id = health_monitor_record[HealthMonitorRecordFields::MONITOR_ID]
case monitor_id
- when HealthMonitorConstants::CONTAINER_CPU_MONITOR_ID, HealthMonitorConstants::CONTAINER_MEMORY_MONITOR_ID, HealthMonitorConstants::USER_WORKLOAD_PODS_READY_MONITOR_ID, HealthMonitorConstants::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID
+ when MonitorId::CONTAINER_CPU_MONITOR_ID, MonitorId::CONTAINER_MEMORY_MONITOR_ID, MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID, MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID
namespace = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['namespace']
- workload_name = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['workloadName']
- workload_kind = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['workloadKind']
+ workload_name = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['workload_name']
+ workload_kind = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['workload_kind']
monitor_labels[HealthMonitorLabels::WORKLOAD_NAME] = workload_name.split('~~')[1]
monitor_labels[HealthMonitorLabels::WORKLOAD_KIND] = workload_kind
monitor_labels[HealthMonitorLabels::NAMESPACE] = namespace
- when HealthMonitorConstants::NODE_CPU_MONITOR_ID, HealthMonitorConstants::NODE_MEMORY_MONITOR_ID, HealthMonitorConstants::NODE_CONDITION_MONITOR_ID
+ # add the container name for container memory/cpu
+ if monitor_id == MonitorId::CONTAINER_CPU_MONITOR_ID || monitor_id == MonitorId::CONTAINER_MEMORY_MONITOR_ID
+ container = health_monitor_record[HealthMonitorRecordFields::DETAILS]['details']['container']
+ monitor_labels[HealthMonitorLabels::CONTAINER] = container
+ end
+
+ #TODO: This doesn't belong here. Move this elsewhere
+ health_monitor_record[HealthMonitorRecordFields::DETAILS]['details'].delete('namespace')
+ health_monitor_record[HealthMonitorRecordFields::DETAILS]['details'].delete('workload_name')
+ health_monitor_record[HealthMonitorRecordFields::DETAILS]['details'].delete('workload_kind')
+
+ when MonitorId::NODE_CPU_MONITOR_ID, MonitorId::NODE_MEMORY_MONITOR_ID, MonitorId::NODE_CONDITION_MONITOR_ID
node_name = health_monitor_record[HealthMonitorRecordFields::NODE_NAME]
@health_kubernetes_resources.get_node_inventory['items'].each do |node|
if !node_name.nil? && !node['metadata']['name'].nil? && node_name == node['metadata']['name']
diff --git a/source/code/plugin/health/health_monitor_state.rb b/source/code/plugin/health/health_monitor_state.rb
index 498c75ec7..7eb674f1e 100644
--- a/source/code/plugin/health/health_monitor_state.rb
+++ b/source/code/plugin/health/health_monitor_state.rb
@@ -57,10 +57,11 @@ def initialize_state(deserialized_state)
2. if there is a "consistent" state change for monitors
3. if the signal is stale (> 4hrs)
4. If the latest state is none
+5. If an aggregate monitor has a change in its details, but no change in state
=end
def update_state(monitor, #UnitMonitor/AggregateMonitor
- monitor_config #Hash
- )
+ monitor_config, #Hash
+ is_aggregate_monitor = false)
samples_to_keep = 1
monitor_instance_id = monitor.monitor_instance_id
log = HealthMonitorHelpers.get_log_handle
@@ -76,12 +77,13 @@ def update_state(monitor, #UnitMonitor/AggregateMonitor
samples_to_keep = monitor_config['ConsecutiveSamplesForStateTransition'].to_i
end
+ deleted_record = {}
if @@monitor_states.key?(monitor_instance_id)
health_monitor_instance_state = @@monitor_states[monitor_instance_id]
health_monitor_records = health_monitor_instance_state.prev_records #This should be an array
if health_monitor_records.size == samples_to_keep
- health_monitor_records.delete_at(0)
+ deleted_record = health_monitor_records.delete_at(0)
end
health_monitor_records.push(monitor.details)
health_monitor_instance_state.prev_records = health_monitor_records
@@ -106,7 +108,6 @@ def update_state(monitor, #UnitMonitor/AggregateMonitor
@@monitor_states[monitor_instance_id] = health_monitor_instance_state
end
-
# update old and new state based on the history and latest record.
# TODO: this is a little hairy. Simplify
@@ -142,6 +143,10 @@ def update_state(monitor, #UnitMonitor/AggregateMonitor
@@first_record_sent[monitor_instance_id] = true
health_monitor_instance_state.should_send = true
set_state(monitor_instance_id, health_monitor_instance_state)
+ elsif agg_monitor_details_changed?(is_aggregate_monitor, deleted_record, health_monitor_instance_state.prev_records[0])
+ health_monitor_instance_state.should_send = true
+ set_state(monitor_instance_id, health_monitor_instance_state)
+ log.debug "#{monitor_instance_id} condition: agg monitor details changed should_send #{health_monitor_instance_state.should_send}"
end
# latest state is different that last sent state
else
@@ -212,5 +217,17 @@ def is_state_change_consistent(health_monitor_records, samples_to_check)
end
return true
end
+
+ def agg_monitor_details_changed?(is_aggregate_monitor, last_sent_details, latest_details)
+ log = HealthMonitorHelpers.get_log_handle
+ if !is_aggregate_monitor
+ return false
+ end
+ if latest_details['details'] != last_sent_details['details']
+ log.info "Last Sent Details #{JSON.pretty_generate(last_sent_details)} \n Latest Details: #{JSON.pretty_generate(latest_details)}"
+ return true
+ end
+ return false
+ end
end
end
\ No newline at end of file
diff --git a/source/code/plugin/health/health_monitor_utils.rb b/source/code/plugin/health/health_monitor_utils.rb
index e707651dc..27e9b9a6e 100644
--- a/source/code/plugin/health/health_monitor_utils.rb
+++ b/source/code/plugin/health/health_monitor_utils.rb
@@ -36,21 +36,31 @@ def compute_percentage_state(value, config)
end
fail_percentage = config['FailThresholdPercentage'].to_f
- if value > fail_percentage
- return HealthMonitorStates::FAIL
- elsif !warn_percentage.nil? && value > warn_percentage
- return HealthMonitorStates::WARNING
+ if !config.nil? && !config['Operator'].nil? && config['Operator'] == '<'
+ if value < fail_percentage
+ return HealthMonitorStates::FAIL
+ elsif !warn_percentage.nil? && value < warn_percentage
+ return HealthMonitorStates::WARNING
+ else
+ return HealthMonitorStates::PASS
+ end
else
- return HealthMonitorStates::PASS
+ if value > fail_percentage
+ return HealthMonitorStates::FAIL
+ elsif !warn_percentage.nil? && value > warn_percentage
+ return HealthMonitorStates::WARNING
+ else
+ return HealthMonitorStates::PASS
+ end
end
end
def is_node_monitor(monitor_id)
- return (monitor_id == HealthMonitorConstants::NODE_CPU_MONITOR_ID || monitor_id == HealthMonitorConstants::NODE_MEMORY_MONITOR_ID || monitor_id == HealthMonitorConstants::NODE_CONDITION_MONITOR_ID)
+ return (monitor_id == MonitorId::NODE_CPU_MONITOR_ID || monitor_id == MonitorId::NODE_MEMORY_MONITOR_ID || monitor_id == MonitorId::NODE_CONDITION_MONITOR_ID)
end
def is_pods_ready_monitor(monitor_id)
- return (monitor_id == HealthMonitorConstants::USER_WORKLOAD_PODS_READY_MONITOR_ID || monitor_id == HealthMonitorConstants::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID)
+ return (monitor_id == MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID || monitor_id == MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID)
end
def is_cluster_health_model_enabled
@@ -136,13 +146,23 @@ def get_pods_ready_hash(pod_inventory, deployment_inventory)
return pods_ready_percentage_hash
end
- def get_node_state_from_node_conditions(node_conditions)
+ def get_node_state_from_node_conditions(monitor_config, node_conditions)
pass = false
+ failtypes = ['outofdisk', 'networkunavailable'].to_set #default fail types
+ if !monitor_config.nil? && !monitor_config["NodeConditionTypesForFailedState"].nil?
+ failtypes = monitor_config["NodeConditionTypesForFailedState"]
+ if !failtypes.nil?
+ failtypes = failtypes.split(',').map{|x| x.downcase}.map{|x| x.gsub(" ","")}.to_set
+ end
+ end
+ log = get_log_handle
+ #log.info "Fail Types #{failtypes.inspect}"
node_conditions.each do |condition|
type = condition['type']
status = condition['status']
- if ((type == "NetworkUnavailable" || type == "OutOfDisk") && (status == 'True' || status == 'Unknown'))
+ #for each condition in the configuration, check if the type is not false. If yes, update state to fail
+ if (failtypes.include?(type.downcase) && (status == 'True' || status == 'Unknown'))
return "fail"
elsif ((type == "DiskPressure" || type == "MemoryPressure" || type == "PIDPressure") && (status == 'True' || status == 'Unknown'))
return "warn"
@@ -280,11 +300,12 @@ def get_monitor_instance_id(monitor_id, args = [])
def ensure_cpu_memory_capacity_set(log, cpu_capacity, memory_capacity, hostname)
log.info "ensure_cpu_memory_capacity_set cpu_capacity #{cpu_capacity} memory_capacity #{memory_capacity}"
- if cpu_capacity != 0.0 && memory_capacity != 0.0
+ if cpu_capacity != 1.0 && memory_capacity != 1.0
log.info "CPU And Memory Capacity are already set"
return [cpu_capacity, memory_capacity]
end
+ log.info "CPU and Memory Capacity Not set"
begin
@@nodeInventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo("nodes").body)
rescue Exception => e
diff --git a/source/code/plugin/health/health_signal_reducer.rb b/source/code/plugin/health/health_signal_reducer.rb
index 1d520da8d..f92f24ac3 100644
--- a/source/code/plugin/health/health_signal_reducer.rb
+++ b/source/code/plugin/health/health_signal_reducer.rb
@@ -20,7 +20,6 @@ def reduce_signals(health_monitor_records, health_k8s_inventory)
if reduced_signals_map.key?(monitor_instance_id)
record = reduced_signals_map[monitor_instance_id]
if health_monitor_record.transition_date_time > record.transition_date_time # always take the latest record for a monitor instance id
- puts 'Duplicate Daemon Set signal'
reduced_signals_map[monitor_instance_id] = health_monitor_record
end
elsif HealthMonitorHelpers.is_node_monitor(monitor_id)
diff --git a/source/code/plugin/health/parent_monitor_provider.rb b/source/code/plugin/health/parent_monitor_provider.rb
index 4577abb99..4ab6e6297 100644
--- a/source/code/plugin/health/parent_monitor_provider.rb
+++ b/source/code/plugin/health/parent_monitor_provider.rb
@@ -8,6 +8,7 @@ def initialize(definition)
@health_model_definition = definition
@parent_monitor_mapping = {} #monitorId --> parent_monitor_id mapping
@parent_monitor_instance_mapping = {} #child monitor id -- > parent monitor instance mapping. Used in instances when the node no longer exists and impossible to compute from kube api results
+ @log = HealthMonitorHelpers.get_log_handle
end
# gets the parent monitor id given the state transition. It requires the monitor id and labels to determine the parent id
@@ -35,14 +36,13 @@ def get_parent_monitor_id(monitor)
op = "#{condition['operator']}"
right = "#{condition['value']}"
cond = left.send(op.to_sym, right)
-
if cond
@parent_monitor_mapping[monitor.monitor_instance_id] = condition['parent_id']
return condition['parent_id']
end
}
end
- raise "Conditions were not met to determine the parent monitor id" if monitor_id != MonitorId::CLUSTER
+ return @health_model_definition[monitor_id]['default_parent_monitor_id']
end
else
raise "Invalid Monitor Id #{monitor_id} in get_parent_monitor_id"
@@ -81,6 +81,7 @@ def get_parent_monitor_instance_id(monitor_instance_id, parent_monitor_id, paren
end
parent_monitor_instance_id = "#{parent_monitor_id}-#{values.join('-')}"
@parent_monitor_instance_mapping[monitor_instance_id] = parent_monitor_instance_id
+ @log.info "parent_monitor_instance_id for #{monitor_instance_id} => #{parent_monitor_instance_id}"
return parent_monitor_instance_id
end
end
diff --git a/source/code/plugin/health/unit_monitor.rb b/source/code/plugin/health/unit_monitor.rb
index 64262aa2e..9af599321 100644
--- a/source/code/plugin/health/unit_monitor.rb
+++ b/source/code/plugin/health/unit_monitor.rb
@@ -1,3 +1,4 @@
+require_relative 'health_model_constants'
require 'json'
module HealthModel
diff --git a/source/code/plugin/in_cadvisor_perf.rb b/source/code/plugin/in_cadvisor_perf.rb
index ce205322d..810fb512f 100644
--- a/source/code/plugin/in_cadvisor_perf.rb
+++ b/source/code/plugin/in_cadvisor_perf.rb
@@ -20,7 +20,7 @@ def initialize
config_param :tag, :string, :default => "oms.api.cadvisorperf"
config_param :mdmtag, :string, :default => "mdm.cadvisorperf"
config_param :nodehealthtag, :string, :default => "kubehealth.DaemonSet.Node"
- #config_param :containerhealthtag, :string, :default => "kubehealth.DaemonSet.Container"
+ config_param :containerhealthtag, :string, :default => "kubehealth.DaemonSet.Container"
def configure(conf)
super
@@ -54,12 +54,11 @@ def enumerate()
record["DataType"] = "LINUX_PERF_BLOB"
record["IPName"] = "LogManagement"
eventStream.add(time, record) if record
- #router.emit(@tag, time, record) if record
- end
+ end
router.emit_stream(@tag, eventStream) if eventStream
router.emit_stream(@mdmtag, eventStream) if eventStream
- #router.emit_stream(@containerhealthtag, eventStream) if eventStream
+ router.emit_stream(@containerhealthtag, eventStream) if eventStream
router.emit_stream(@nodehealthtag, eventStream) if eventStream
@@istestvar = ENV["ISTEST"]
diff --git a/source/code/plugin/in_kube_events.rb b/source/code/plugin/in_kube_events.rb
index f177b62bf..e1fdc5df6 100644
--- a/source/code/plugin/in_kube_events.rb
+++ b/source/code/plugin/in_kube_events.rb
@@ -47,17 +47,20 @@ def enumerate(eventList = nil)
currentTime = Time.now
emitTime = currentTime.to_f
batchTime = currentTime.utc.iso8601
- if eventList.nil?
- $log.info("in_kube_events::enumerate : Getting events from Kube API @ #{Time.now.utc.iso8601}")
- events = JSON.parse(KubernetesApiClient.getKubeResourceInfo("events").body)
- $log.info("in_kube_events::enumerate : Done getting events from Kube API @ #{Time.now.utc.iso8601}")
- else
- events = eventList
+
+ events = eventList
+ $log.info("in_kube_events::enumerate : Getting events from Kube API @ #{Time.now.utc.iso8601}")
+ eventInfo = KubernetesApiClient.getKubeResourceInfo("events")
+ $log.info("in_kube_events::enumerate : Done getting events from Kube API @ #{Time.now.utc.iso8601}")
+
+ if !eventInfo.nil?
+ events = JSON.parse(eventInfo.body)
end
+
eventQueryState = getEventQueryState
newEventQueryState = []
begin
- if (!events.empty? && !events["items"].nil?)
+ if (!events.nil? && !events.empty? && !events["items"].nil?)
eventStream = MultiEventStream.new
events["items"].each do |items|
record = {}
@@ -84,7 +87,7 @@ def enumerate(eventList = nil)
else
record["Computer"] = (OMS::Common.get_hostname)
end
- record["ClusterName"] = KubernetesApiClient.getClusterName
+ record['ClusterName'] = KubernetesApiClient.getClusterName
record["ClusterId"] = KubernetesApiClient.getClusterId
wrapper = {
"DataType" => "KUBE_EVENTS_BLOB",
diff --git a/source/code/plugin/in_kube_health.rb b/source/code/plugin/in_kube_health.rb
index 5d29eb035..9a1b8f9a9 100644
--- a/source/code/plugin/in_kube_health.rb
+++ b/source/code/plugin/in_kube_health.rb
@@ -26,7 +26,6 @@ def initialize
@@cluster_id = KubernetesApiClient.getClusterId
@resources = HealthKubernetesResources.instance
@provider = HealthMonitorProvider.new(@@cluster_id, HealthMonitorUtils.get_cluster_labels, @resources, @health_monitor_config_path)
- @@cluster_health_model_enabled = HealthMonitorUtils.is_cluster_health_model_enabled
rescue => e
ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
end
@@ -55,9 +54,7 @@ def start
@@clusterCpuCapacity = cluster_capacity[0]
@@clusterMemoryCapacity = cluster_capacity[1]
@@hmlog.info "Cluster CPU Capacity: #{@@clusterCpuCapacity} Memory Capacity: #{@@clusterMemoryCapacity}"
- if @@cluster_health_model_enabled
- ApplicationInsightsUtility.sendCustomEvent("in_kube_health Plugin Start", {})
- end
+ initialize_inventory
end
rescue => e
ApplicationInsightsUtility.sendExceptionTelemetry(e, {"FeatureArea" => "Health"})
@@ -76,10 +73,6 @@ def shutdown
def enumerate
begin
- if !@@cluster_health_model_enabled
- @@hmlog.info "Cluster Health Model disabled in in_kube_health"
- return
- end
currentTime = Time.now
emitTime = currentTime.to_f
@@ -97,7 +90,8 @@ def enumerate
@resources.node_inventory = node_inventory
@resources.pod_inventory = pod_inventory
- @resources.deployment_inventory = deployment_inventory
+ @resources.set_deployment_inventory(deployment_inventory)
+ @resources.build_pod_uid_lookup
if node_inventory_response.code.to_i != 200
record = process_kube_api_up_monitor("fail", node_inventory_response)
@@ -117,12 +111,12 @@ def enumerate
system_pods = pods_ready_hash.select{|k,v| v['namespace'] == 'kube-system'}
workload_pods = pods_ready_hash.select{|k,v| v['namespace'] != 'kube-system'}
- system_pods_ready_percentage_records = process_pods_ready_percentage(system_pods, HealthMonitorConstants::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID)
+ system_pods_ready_percentage_records = process_pods_ready_percentage(system_pods, MonitorId::SYSTEM_WORKLOAD_PODS_READY_MONITOR_ID)
system_pods_ready_percentage_records.each do |record|
health_monitor_records.push(record) if record
end
- workload_pods_ready_percentage_records = process_pods_ready_percentage(workload_pods, HealthMonitorConstants::USER_WORKLOAD_PODS_READY_MONITOR_ID)
+ workload_pods_ready_percentage_records = process_pods_ready_percentage(workload_pods, MonitorId::USER_WORKLOAD_PODS_READY_MONITOR_ID)
workload_pods_ready_percentage_records.each do |record|
health_monitor_records.push(record) if record
end
@@ -158,7 +152,7 @@ def process_cpu_oversubscribed_monitor(pod_inventory, node_inventory)
state = subscription > @@clusterCpuCapacity ? "fail" : "pass"
#CPU
- monitor_id = HealthMonitorConstants::WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID
+ monitor_id = MonitorId::WORKLOAD_CPU_OVERSUBSCRIBED_MONITOR_ID
health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"clusterCpuCapacity" => @@clusterCpuCapacity/1000000.to_f, "clusterCpuRequests" => subscription/1000000.to_f}}
# @@hmlog.info health_monitor_record
@@ -169,7 +163,7 @@ def process_cpu_oversubscribed_monitor(pod_inventory, node_inventory)
health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id
#@@hmlog.info "Successfully processed process_cpu_oversubscribed_monitor"
@@ -185,7 +179,7 @@ def process_memory_oversubscribed_monitor(pod_inventory, node_inventory)
#@@hmlog.debug "Memory Oversubscribed Monitor State : #{state}"
#CPU
- monitor_id = HealthMonitorConstants::WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID
+ monitor_id = MonitorId::WORKLOAD_MEMORY_OVERSUBSCRIBED_MONITOR_ID
health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"clusterMemoryCapacity" => @@clusterMemoryCapacity.to_f, "clusterMemoryRequests" => subscription.to_f}}
hmlog = HealthMonitorUtils.get_log_handle
@@ -195,7 +189,7 @@ def process_memory_oversubscribed_monitor(pod_inventory, node_inventory)
health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id
#@@hmlog.info "Successfully processed process_memory_oversubscribed_monitor"
@@ -205,21 +199,21 @@ def process_memory_oversubscribed_monitor(pod_inventory, node_inventory)
def process_kube_api_up_monitor(state, response)
timestamp = Time.now.utc.iso8601
- monitor_id = HealthMonitorConstants::KUBE_API_STATUS
+ monitor_id = MonitorId::KUBE_API_STATUS
details = response.each_header.to_h
details['ResponseCode'] = response.code
health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => details}
hmlog = HealthMonitorUtils.get_log_handle
#hmlog.info health_monitor_record
- monitor_instance_id = HealthMonitorConstants::KUBE_API_STATUS
+ monitor_instance_id = MonitorId::KUBE_API_STATUS
#hmlog.info "Monitor Instance Id: #{monitor_instance_id}"
health_record = {}
time_now = Time.now.utc.iso8601
health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id
#@@hmlog.info "Successfully processed process_kube_api_up_monitor"
@@ -240,15 +234,15 @@ def process_pods_ready_percentage(pods_hash, config_monitor_id)
percent = pods_ready / total_pods * 100
timestamp = Time.now.utc.iso8601
- state = HealthMonitorUtils.compute_percentage_state((100-percent), monitor_config)
- health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"totalPods" => total_pods, "podsReady" => pods_ready, "workloadName" => workload_name, "namespace" => namespace, "workloadKind" => workload_kind}}
+ state = HealthMonitorUtils.compute_percentage_state(percent, monitor_config)
+ health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => {"totalPods" => total_pods, "podsReady" => pods_ready, "workload_name" => workload_name, "namespace" => namespace, "workload_kind" => workload_kind}}
monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(config_monitor_id, [@@cluster_id, namespace, workload_name])
health_record = {}
time_now = Time.now.utc.iso8601
health_record[HealthMonitorRecordFields::MONITOR_ID] = config_monitor_id
health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id
records.push(health_record)
@@ -258,7 +252,7 @@ def process_pods_ready_percentage(pods_hash, config_monitor_id)
end
def process_node_condition_monitor(node_inventory)
- monitor_id = HealthMonitorConstants::NODE_CONDITION_MONITOR_ID
+ monitor_id = MonitorId::NODE_CONDITION_MONITOR_ID
timestamp = Time.now.utc.iso8601
monitor_config = @provider.get_config(monitor_id)
node_condition_monitor_records = []
@@ -266,11 +260,12 @@ def process_node_condition_monitor(node_inventory)
node_inventory['items'].each do |node|
node_name = node['metadata']['name']
conditions = node['status']['conditions']
- state = HealthMonitorUtils.get_node_state_from_node_conditions(conditions)
- #hmlog.debug "Node Name = #{node_name} State = #{state}"
+ state = HealthMonitorUtils.get_node_state_from_node_conditions(monitor_config, conditions)
details = {}
conditions.each do |condition|
- details[condition['type']] = {"Reason" => condition['reason'], "Message" => condition['message']}
+ state = !(condition['status'].downcase == 'true' && condition['type'].downcase != 'ready') ? HealthMonitorStates::PASS : HealthMonitorStates::FAIL
+ details[condition['type']] = {"Reason" => condition['reason'], "Message" => condition['message'], "State" => state}
+ #@@hmlog.info "Node Condition details: #{JSON.pretty_generate(details)}"
end
health_monitor_record = {"timestamp" => timestamp, "state" => state, "details" => details}
monitor_instance_id = HealthMonitorUtils.get_monitor_instance_id(monitor_id, [@@cluster_id, node_name])
@@ -279,7 +274,7 @@ def process_node_condition_monitor(node_inventory)
health_record[HealthMonitorRecordFields::MONITOR_ID] = monitor_id
health_record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID] = monitor_instance_id
health_record[HealthMonitorRecordFields::DETAILS] = health_monitor_record
- health_record[HealthMonitorRecordFields::AGENT_COLLECTION_TIME] = time_now
+ health_record[HealthMonitorRecordFields::TIME_GENERATED] = time_now
health_record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED] = time_now
health_record[HealthMonitorRecordFields::CLUSTER_ID] = @@cluster_id
health_record[HealthMonitorRecordFields::NODE_NAME] = node_name
@@ -290,6 +285,20 @@ def process_node_condition_monitor(node_inventory)
return node_condition_monitor_records
end
+ def initialize_inventory
+ #this is required because there are other components, like the container cpu memory aggregator, that depends on the mapping being initialized
+ node_inventory_response = KubernetesApiClient.getKubeResourceInfo("nodes")
+ node_inventory = JSON.parse(node_inventory_response.body)
+ pod_inventory_response = KubernetesApiClient.getKubeResourceInfo("pods")
+ pod_inventory = JSON.parse(pod_inventory_response.body)
+ deployment_inventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo("deployments", api_version: "extensions/v1beta1").body)
+
+ @resources.node_inventory = node_inventory
+ @resources.pod_inventory = pod_inventory
+ @resources.set_deployment_inventory(deployment_inventory)
+ @resources.build_pod_uid_lookup
+ end
+
def run_periodic
@mutex.lock
done = @finished
diff --git a/source/code/plugin/in_kube_nodes.rb b/source/code/plugin/in_kube_nodes.rb
index 24ab51d4c..0a0fd9d2e 100644
--- a/source/code/plugin/in_kube_nodes.rb
+++ b/source/code/plugin/in_kube_nodes.rb
@@ -8,6 +8,7 @@ class Kube_nodeInventory_Input < Input
@@ContainerNodeInventoryTag = "oms.containerinsights.ContainerNodeInventory"
@@MDMKubeNodeInventoryTag = "mdm.kubenodeinventory"
@@promConfigMountPath = "/etc/config/settings/prometheus-data-collection-settings"
+ @@AzStackCloudFileName = "/etc/kubernetes/host/azurestackcloud.json"
@@rsPromInterval = ENV["TELEMETRY_RS_PROM_INTERVAL"]
@@rsPromFieldPassCount = ENV["TELEMETRY_RS_PROM_FIELDPASS_LENGTH"]
@@ -15,6 +16,7 @@ class Kube_nodeInventory_Input < Input
@@rsPromK8sServiceCount = ENV["TELEMETRY_RS_PROM_K8S_SERVICES_LENGTH"]
@@rsPromUrlCount = ENV["TELEMETRY_RS_PROM_URLS_LENGTH"]
@@rsPromMonitorPods = ENV["TELEMETRY_RS_PROM_MONITOR_PODS"]
+ @@rsPromMonitorPodsNamespaceLength = ENV["TELEMETRY_RS_PROM_MONITOR_PODS_NS_LENGTH"]
def initialize
super
@@ -59,11 +61,19 @@ def enumerate
emitTime = currentTime.to_f
batchTime = currentTime.utc.iso8601
telemetrySent = false
+
+ nodeInventory = nil
+
$log.info("in_kube_nodes::enumerate : Getting nodes from Kube API @ #{Time.now.utc.iso8601}")
- nodeInventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo("nodes").body)
+ nodeInfo = KubernetesApiClient.getKubeResourceInfo("nodes")
$log.info("in_kube_nodes::enumerate : Done getting nodes from Kube API @ #{Time.now.utc.iso8601}")
+
+ if !nodeInfo.nil?
+ nodeInventory = JSON.parse(nodeInfo.body)
+ end
+
begin
- if (!nodeInventory.empty?)
+ if (!nodeInventory.nil? && !nodeInventory.empty?)
eventStream = MultiEventStream.new
containerNodeInventoryEventStream = MultiEventStream.new
if !nodeInventory["items"].nil?
@@ -83,6 +93,16 @@ def enumerate
record["Labels"] = [items["metadata"]["labels"]]
record["Status"] = ""
+ if !items["spec"]["providerID"].nil? && !items["spec"]["providerID"].empty?
+ if File.file?(@@AzStackCloudFileName) # existence of this file indicates agent running on azstack
+ record["KubernetesProviderID"] = "azurestack"
+ else
+ record["KubernetesProviderID"] = items["spec"]["providerID"]
+ end
+ else
+ record["KubernetesProviderID"] = "onprem"
+ end
+
# Refer to https://kubernetes.io/docs/concepts/architecture/nodes/#condition for possible node conditions.
# We check the status of each condition e.g. {"type": "OutOfDisk","status": "False"} . Based on this we
# populate the KubeNodeInventory Status field. A possible value for this field could be "Ready OutofDisk"
@@ -138,6 +158,9 @@ def enumerate
properties["KubeletVersion"] = record["KubeletVersion"]
properties["OperatingSystem"] = nodeInfo["operatingSystem"]
properties["DockerVersion"] = dockerVersion
+ properties["KubernetesProviderID"] = record["KubernetesProviderID"]
+ properties["KernelVersion"] = nodeInfo["kernelVersion"]
+ properties["OSImage"] = nodeInfo["osImage"]
capacityInfo = items["status"]["capacity"]
ApplicationInsightsUtility.sendMetricTelemetry("NodeMemory", capacityInfo["memory"], properties)
@@ -150,6 +173,7 @@ def enumerate
properties["rsPromServ"] = @@rsPromK8sServiceCount
properties["rsPromUrl"] = @@rsPromUrlCount
properties["rsPromMonPods"] = @@rsPromMonitorPods
+ properties["rsPromMonPodsNs"] = @@rsPromMonitorPodsNamespaceLength
end
ApplicationInsightsUtility.sendMetricTelemetry("NodeCoreCapacity", capacityInfo["cpu"], properties)
telemetrySent = true
diff --git a/source/code/plugin/in_kube_podinventory.rb b/source/code/plugin/in_kube_podinventory.rb
index f41ce9095..766831a66 100644
--- a/source/code/plugin/in_kube_podinventory.rb
+++ b/source/code/plugin/in_kube_podinventory.rb
@@ -48,13 +48,15 @@ def shutdown
end
def enumerate(podList = nil)
- if podList.nil?
- $log.info("in_kube_podinventory::enumerate : Getting pods from Kube API @ #{Time.now.utc.iso8601}")
- podInventory = JSON.parse(KubernetesApiClient.getKubeResourceInfo("pods").body)
- $log.info("in_kube_podinventory::enumerate : Done getting pods from Kube API @ #{Time.now.utc.iso8601}")
- else
- podInventory = podList
+ podInventory = podList
+ $log.info("in_kube_podinventory::enumerate : Getting pods from Kube API @ #{Time.now.utc.iso8601}")
+ podInfo = KubernetesApiClient.getKubeResourceInfo("pods")
+ $log.info("in_kube_podinventory::enumerate : Done getting pods from Kube API @ #{Time.now.utc.iso8601}")
+
+ if !podInfo.nil?
+ podInventory = JSON.parse(podInfo.body)
end
+
begin
if (!podInventory.empty? && podInventory.key?("items") && !podInventory["items"].empty?)
#get pod inventory & services
@@ -137,8 +139,16 @@ def getContainerEnvironmentVariables(pod, clusterCollectEnvironmentVar)
begin
podSpec = pod["spec"]
containerEnvHash = {}
- if !podSpec.nil? && !podSpec["containers"].nil?
- podSpec["containers"].each do |container|
+ podContainersEnv = []
+ if !podSpec["containers"].nil? && !podSpec["containers"].empty?
+ podContainersEnv = podContainersEnv + podSpec["containers"]
+ end
+ # Adding init containers to the record list as well.
+ if !podSpec["initContainers"].nil? && !podSpec["initContainers"].empty?
+ podContainersEnv = podContainersEnv + podSpec["initContainers"]
+ end
+ if !podContainersEnv.nil? && !podContainersEnv.empty?
+ podContainersEnv.each do |container|
if !clusterCollectEnvironmentVar.nil? && !clusterCollectEnvironmentVar.empty? && clusterCollectEnvironmentVar.casecmp("false") == 0
containerEnvHash[container["name"]] = ["AZMON_CLUSTER_COLLECT_ENV_VAR=FALSE"]
else
@@ -289,8 +299,19 @@ def parse_and_emit_records(podInventory, serviceList)
end
podRestartCount = 0
record["PodRestartCount"] = 0
- if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? #container status block start
- items["status"]["containerStatuses"].each do |container|
+
+ podContainers = []
+ if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty?
+ podContainers = podContainers + items["status"]["containerStatuses"]
+ end
+ # Adding init containers to the record list as well.
+ if items["status"].key?("initContainerStatuses") && !items["status"]["initContainerStatuses"].empty?
+ podContainers = podContainers + items["status"]["initContainerStatuses"]
+ end
+
+ # if items["status"].key?("containerStatuses") && !items["status"]["containerStatuses"].empty? #container status block start
+ if !podContainers.empty? #container status block start
+ podContainers.each do |container|
containerRestartCount = 0
#container Id is of the form
#docker://dfd9da983f1fd27432fb2c1fe3049c0a1d25b1c697b2dc1a530c986e58b16527
diff --git a/source/code/plugin/in_kube_services.rb b/source/code/plugin/in_kube_services.rb
index 8b0a013e4..7cd703620 100644
--- a/source/code/plugin/in_kube_services.rb
+++ b/source/code/plugin/in_kube_services.rb
@@ -46,11 +46,19 @@ def enumerate
currentTime = Time.now
emitTime = currentTime.to_f
batchTime = currentTime.utc.iso8601
+
+ serviceList = nil
+
$log.info("in_kube_services::enumerate : Getting services from Kube API @ #{Time.now.utc.iso8601}")
- serviceList = JSON.parse(KubernetesApiClient.getKubeResourceInfo("services").body)
+ serviceInfo = KubernetesApiClient.getKubeResourceInfo("services")
$log.info("in_kube_services::enumerate : Done getting services from Kube API @ #{Time.now.utc.iso8601}")
+
+ if !serviceInfo.nil?
+ serviceList = JSON.parse(serviceInfo.body)
+ end
+
begin
- if (!serviceList.empty?)
+ if (!serviceList.nil? && !serviceList.empty?)
eventStream = MultiEventStream.new
serviceList["items"].each do |items|
record = {}
diff --git a/source/code/plugin/out_health_forward.rb b/source/code/plugin/out_health_forward.rb
new file mode 100644
index 000000000..18664a22a
--- /dev/null
+++ b/source/code/plugin/out_health_forward.rb
@@ -0,0 +1,677 @@
+#
+# Fluentd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'base64'
+require 'socket'
+require 'fileutils'
+
+require 'cool.io'
+
+require 'fluent/output'
+require 'fluent/config/error'
+
+module Fluent
+ class ForwardOutputError < StandardError
+ end
+
+ class ForwardOutputResponseError < ForwardOutputError
+ end
+
+ class ForwardOutputConnectionClosedError < ForwardOutputError
+ end
+
+ class ForwardOutputACKTimeoutError < ForwardOutputResponseError
+ end
+
+ class HealthForwardOutput < ObjectBufferedOutput
+ Plugin.register_output('health_forward', self)
+
+ def initialize
+ super
+ require 'fluent/plugin/socket_util'
+ @nodes = [] #=> [Node]
+ end
+
+ desc 'The timeout time when sending event logs.'
+ config_param :send_timeout, :time, default: 60
+ desc 'The transport protocol to use for heartbeats.(udp,tcp,none)'
+ config_param :heartbeat_type, default: :udp do |val|
+ case val.downcase
+ when 'tcp'
+ :tcp
+ when 'udp'
+ :udp
+ when 'none'
+ :none
+ else
+ raise ConfigError, "forward output heartbeat type should be 'tcp', 'udp', or 'none'"
+ end
+ end
+ desc 'The interval of the heartbeat packer.'
+ config_param :heartbeat_interval, :time, default: 1
+ desc 'The wait time before accepting a server fault recovery.'
+ config_param :recover_wait, :time, default: 10
+ desc 'The hard timeout used to detect server failure.'
+ config_param :hard_timeout, :time, default: 60
+ desc 'Set TTL to expire DNS cache in seconds.'
+ config_param :expire_dns_cache, :time, default: nil # 0 means disable cache
+ desc 'The threshold parameter used to detect server faults.'
+ config_param :phi_threshold, :integer, default: 16
+ desc 'Use the "Phi accrual failure detector" to detect server failure.'
+ config_param :phi_failure_detector, :bool, default: true
+
+ # if any options added that requires extended forward api, fix @extend_internal_protocol
+
+ desc 'Change the protocol to at-least-once.'
+ config_param :require_ack_response, :bool, default: false # require in_forward to respond with ack
+ desc 'This option is used when require_ack_response is true.'
+ config_param :ack_response_timeout, :time, default: 190 # 0 means do not wait for ack responses
+ # Linux default tcp_syn_retries is 5 (in many environment)
+ # 3 + 6 + 12 + 24 + 48 + 96 -> 189 (sec)
+ desc 'Enable client-side DNS round robin.'
+ config_param :dns_round_robin, :bool, default: false # heartbeat_type 'udp' is not available for this
+
+ attr_reader :nodes
+
+ config_param :port, :integer, default: DEFAULT_LISTEN_PORT, deprecated: "User host xxx instead."
+ config_param :host, :string, default: nil, deprecated: "Use port xxx instead."
+ desc 'Skip network related error, e.g. DNS error, during plugin setup'
+ config_param :skip_network_error_at_init, :bool, :default => false
+
+
+ attr_accessor :extend_internal_protocol
+
+ def configure(conf)
+ super
+
+ # backward compatibility
+ if host = conf['host']
+ port = conf['port']
+ port = port ? port.to_i : DEFAULT_LISTEN_PORT
+ e = conf.add_element('server')
+ e['host'] = host
+ e['port'] = port.to_s
+ end
+
+ recover_sample_size = @recover_wait / @heartbeat_interval
+
+ # add options here if any options addes which uses extended protocol
+ @extend_internal_protocol = if @require_ack_response
+ true
+ else
+ false
+ end
+
+ if @dns_round_robin
+ if @heartbeat_type == :udp
+ raise ConfigError, "forward output heartbeat type must be 'tcp' or 'none' to use dns_round_robin option"
+ end
+ end
+
+ conf.elements.each {|e|
+ next if e.name != "server"
+
+ host = e['host']
+ port = e['port']
+ port = port ? port.to_i : DEFAULT_LISTEN_PORT
+
+ weight = e['weight']
+ weight = weight ? weight.to_i : 60
+
+ standby = !!e['standby']
+
+ name = e['name']
+ unless name
+ name = "#{host}:#{port}"
+ end
+
+ failure = FailureDetector.new(@heartbeat_interval, @hard_timeout, Time.now.to_i.to_f)
+
+ node_conf = NodeConfig2.new(name, host, port, weight, standby, failure,
+ @phi_threshold, recover_sample_size, @expire_dns_cache, @phi_failure_detector, @dns_round_robin, @skip_network_error_at_init)
+
+ if @heartbeat_type == :none
+ @nodes << NoneHeartbeatNode.new(log, node_conf)
+ else
+ @nodes << Node.new(log, node_conf)
+ end
+ log.info "adding forwarding server '#{name}'", host: host, port: port, weight: weight, plugin_id: plugin_id
+ }
+
+ if @nodes.empty?
+ raise ConfigError, "forward output plugin requires at least one is required"
+ end
+ end
+
+ def start
+ super
+
+ @rand_seed = Random.new.seed
+ rebuild_weight_array
+ @rr = 0
+
+ unless @heartbeat_type == :none
+ @loop = Coolio::Loop.new
+
+ if @heartbeat_type == :udp
+ # assuming all hosts use udp
+ @usock = SocketUtil.create_udp_socket(@nodes.first.host)
+ @usock.fcntl(Fcntl::F_SETFL, Fcntl::O_NONBLOCK)
+ @hb = HeartbeatHandler.new(@usock, method(:on_heartbeat))
+ @loop.attach(@hb)
+ end
+
+ @timer = HeartbeatRequestTimer.new(@heartbeat_interval, method(:on_timer))
+ @loop.attach(@timer)
+
+ @thread = Thread.new(&method(:run))
+ end
+ end
+
+ def shutdown
+ @finished = true
+ if @loop
+ @loop.watchers.each {|w| w.detach }
+ @loop.stop
+ end
+ @thread.join if @thread
+ @usock.close if @usock
+ end
+
+ def run
+ @loop.run if @loop
+ rescue
+ log.error "unexpected error", error: $!.to_s
+ log.error_backtrace
+ end
+
+ def write_objects(tag, chunk)
+ return if chunk.empty?
+
+ error = nil
+
+ wlen = @weight_array.length
+ wlen.times do
+ @rr = (@rr + 1) % wlen
+ node = @weight_array[@rr]
+
+ if node.available?
+ begin
+ send_data(node, tag, chunk)
+ return
+ rescue
+ # for load balancing during detecting crashed servers
+ error = $! # use the latest error
+ end
+ end
+ end
+
+ if error
+ raise error
+ else
+ raise "no nodes are available" # TODO message
+ end
+ end
+
+ private
+
+ def rebuild_weight_array
+ standby_nodes, regular_nodes = @nodes.partition {|n|
+ n.standby?
+ }
+
+ lost_weight = 0
+ regular_nodes.each {|n|
+ unless n.available?
+ lost_weight += n.weight
+ end
+ }
+ log.debug "rebuilding weight array", lost_weight: lost_weight
+
+ if lost_weight > 0
+ standby_nodes.each {|n|
+ if n.available?
+ regular_nodes << n
+ log.warn "using standby node #{n.host}:#{n.port}", weight: n.weight
+ lost_weight -= n.weight
+ break if lost_weight <= 0
+ end
+ }
+ end
+
+ weight_array = []
+ gcd = regular_nodes.map {|n| n.weight }.inject(0) {|r,w| r.gcd(w) }
+ regular_nodes.each {|n|
+ (n.weight / gcd).times {
+ weight_array << n
+ }
+ }
+
+ # for load balancing during detecting crashed servers
+ coe = (regular_nodes.size * 6) / weight_array.size
+ weight_array *= coe if coe > 1
+
+ r = Random.new(@rand_seed)
+ weight_array.sort_by! { r.rand }
+
+ @weight_array = weight_array
+ end
+
+ # MessagePack FixArray length = 3 (if @extend_internal_protocol)
+ # = 2 (else)
+ FORWARD_HEADER = [0x92].pack('C').freeze
+ FORWARD_HEADER_EXT = [0x93].pack('C').freeze
+ def forward_header
+ if @extend_internal_protocol
+ FORWARD_HEADER_EXT
+ else
+ FORWARD_HEADER
+ end
+ end
+
+ #FORWARD_TCP_HEARTBEAT_DATA = FORWARD_HEADER + ''.to_msgpack + [].to_msgpack
+ def send_heartbeat_tcp(node)
+ sock = connect(node)
+ begin
+ opt = [1, @send_timeout.to_i].pack('I!I!') # { int l_onoff; int l_linger; }
+ sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_LINGER, opt)
+ opt = [@send_timeout.to_i, 0].pack('L!L!') # struct timeval
+ # don't send any data to not cause a compatibility problem
+ #sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, opt)
+ #sock.write FORWARD_TCP_HEARTBEAT_DATA
+ node.heartbeat(true)
+ ensure
+ sock.close
+ end
+ end
+
+ def send_data(node, tag, chunk)
+ sock = connect(node)
+ begin
+ opt = [1, @send_timeout.to_i].pack('I!I!') # { int l_onoff; int l_linger; }
+ sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_LINGER, opt)
+
+ opt = [@send_timeout.to_i, 0].pack('L!L!') # struct timeval
+ sock.setsockopt(Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, opt)
+
+ # beginArray(2)
+ sock.write forward_header
+
+ # writeRaw(tag)
+ sock.write tag.to_msgpack # tag
+
+ # beginRaw(size)
+ sz = chunk.size
+ #if sz < 32
+ # # FixRaw
+ # sock.write [0xa0 | sz].pack('C')
+ #elsif sz < 65536
+ # # raw 16
+ # sock.write [0xda, sz].pack('Cn')
+ #else
+ # raw 32
+ sock.write [0xdb, sz].pack('CN')
+ #end
+
+ # writeRawBody(packed_es)
+ chunk.write_to(sock)
+
+ if @extend_internal_protocol
+ option = {}
+ option['chunk'] = Base64.encode64(chunk.unique_id) if @require_ack_response
+ sock.write option.to_msgpack
+
+ if @require_ack_response && @ack_response_timeout > 0
+ # Waiting for a response here results in a decrease of throughput because a chunk queue is locked.
+ # To avoid a decrease of troughput, it is necessary to prepare a list of chunks that wait for responses
+ # and process them asynchronously.
+ if IO.select([sock], nil, nil, @ack_response_timeout)
+ raw_data = sock.recv(1024)
+
+ # When connection is closed by remote host, socket is ready to read and #recv returns an empty string that means EOF.
+ # If this happens we assume the data wasn't delivered and retry it.
+ if raw_data.empty?
+ @log.warn "node #{node.host}:#{node.port} closed the connection. regard it as unavailable."
+ node.disable!
+ raise ForwardOutputConnectionClosedError, "node #{node.host}:#{node.port} closed connection"
+ else
+ # Serialization type of the response is same as sent data.
+ res = MessagePack.unpack(raw_data)
+
+ if res['ack'] != option['chunk']
+ # Some errors may have occured when ack and chunk id is different, so send the chunk again.
+ raise ForwardOutputResponseError, "ack in response and chunk id in sent data are different"
+ end
+ end
+
+ else
+ # IO.select returns nil on timeout.
+ # There are 2 types of cases when no response has been received:
+ # (1) the node does not support sending responses
+ # (2) the node does support sending response but responses have not arrived for some reasons.
+ @log.warn "no response from #{node.host}:#{node.port}. regard it as unavailable."
+ node.disable!
+ raise ForwardOutputACKTimeoutError, "node #{node.host}:#{node.port} does not return ACK"
+ end
+ end
+ end
+
+ node.heartbeat(false)
+ return res # for test
+ ensure
+ sock.close
+ end
+ end
+
+ def connect(node)
+ # TODO unix socket?
+ TCPSocket.new(node.resolved_host, node.port)
+ end
+
+ class HeartbeatRequestTimer < Coolio::TimerWatcher
+ def initialize(interval, callback)
+ super(interval, true)
+ @callback = callback
+ end
+
+ def on_timer
+ @callback.call
+ rescue
+ # TODO log?
+ end
+ end
+
+ def on_timer
+ return if @finished
+ @nodes.each {|n|
+ if n.tick
+ rebuild_weight_array
+ end
+ begin
+ #log.trace "sending heartbeat #{n.host}:#{n.port} on #{@heartbeat_type}"
+ if @heartbeat_type == :tcp
+ send_heartbeat_tcp(n)
+ else
+ @usock.send "\0", 0, Socket.pack_sockaddr_in(n.port, n.resolved_host)
+ end
+ rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR, Errno::ECONNREFUSED
+ # TODO log
+ log.debug "failed to send heartbeat packet to #{n.host}:#{n.port}", error: $!.to_s
+ end
+ }
+ end
+
+ class HeartbeatHandler < Coolio::IO
+ def initialize(io, callback)
+ super(io)
+ @io = io
+ @callback = callback
+ end
+
+ def on_readable
+ begin
+ msg, addr = @io.recvfrom(1024)
+ rescue Errno::EAGAIN, Errno::EWOULDBLOCK, Errno::EINTR
+ return
+ end
+ host = addr[3]
+ port = addr[1]
+ sockaddr = Socket.pack_sockaddr_in(port, host)
+ @callback.call(sockaddr, msg)
+ rescue
+ # TODO log?
+ end
+ end
+
+ def on_heartbeat(sockaddr, msg)
+ port, host = Socket.unpack_sockaddr_in(sockaddr)
+ if node = @nodes.find {|n| n.sockaddr == sockaddr }
+ #log.trace "heartbeat from '#{node.name}'", :host=>node.host, :port=>node.port
+ if node.heartbeat
+ rebuild_weight_array
+ end
+ end
+ end
+
+ NodeConfig2 = Struct.new("NodeConfig2", :name, :host, :port, :weight, :standby, :failure,
+ :phi_threshold, :recover_sample_size, :expire_dns_cache, :phi_failure_detector, :dns_round_robin, :skip_network_error)
+
+ class Node
+ def initialize(log, conf)
+ @log = log
+ @conf = conf
+ @name = @conf.name
+ @host = @conf.host
+ @port = @conf.port
+ @weight = @conf.weight
+ @failure = @conf.failure
+ @available = true
+
+ @resolved_host = nil
+ @resolved_time = 0
+ begin
+ resolved_host # check dns
+ rescue => e
+ if @conf.skip_network_error
+ log.warn "#{@name} got network error during setup. Resolve host later", :error => e, :error_class => e.class
+ else
+ raise
+ end
+ end
+ end
+
+ attr_reader :conf
+ attr_reader :name, :host, :port, :weight
+ attr_reader :sockaddr # used by on_heartbeat
+ attr_reader :failure, :available # for test
+
+ def available?
+ @available
+ end
+
+ def disable!
+ @available = false
+ end
+
+ def standby?
+ @conf.standby
+ end
+
+ def resolved_host
+ case @conf.expire_dns_cache
+ when 0
+ # cache is disabled
+ return resolve_dns!
+
+ when nil
+ # persistent cache
+ return @resolved_host ||= resolve_dns!
+
+ else
+ now = Engine.now
+ rh = @resolved_host
+ if !rh || now - @resolved_time >= @conf.expire_dns_cache
+ rh = @resolved_host = resolve_dns!
+ @resolved_time = now
+ end
+ return rh
+ end
+ end
+
+ def resolve_dns!
+ addrinfo_list = Socket.getaddrinfo(@host, @port, nil, Socket::SOCK_STREAM)
+ addrinfo = @conf.dns_round_robin ? addrinfo_list.sample : addrinfo_list.first
+ @sockaddr = Socket.pack_sockaddr_in(addrinfo[1], addrinfo[3]) # used by on_heartbeat
+ addrinfo[3]
+ end
+ private :resolve_dns!
+
+ def tick
+ now = Time.now.to_f
+ if !@available
+ if @failure.hard_timeout?(now)
+ @failure.clear
+ end
+ return nil
+ end
+
+ if @failure.hard_timeout?(now)
+ @log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, hard_timeout: true
+ @available = false
+ @resolved_host = nil # expire cached host
+ @failure.clear
+ return true
+ end
+
+ if @conf.phi_failure_detector
+ phi = @failure.phi(now)
+ #$log.trace "phi '#{@name}'", :host=>@host, :port=>@port, :phi=>phi
+ if phi > @conf.phi_threshold
+ @log.warn "detached forwarding server '#{@name}'", host: @host, port: @port, phi: phi
+ @available = false
+ @resolved_host = nil # expire cached host
+ @failure.clear
+ return true
+ end
+ end
+ return false
+ end
+
+ def heartbeat(detect=true)
+ now = Time.now.to_f
+ @failure.add(now)
+ #@log.trace "heartbeat from '#{@name}'", :host=>@host, :port=>@port, :available=>@available, :sample_size=>@failure.sample_size
+ if detect && !@available && @failure.sample_size > @conf.recover_sample_size
+ @available = true
+ @log.warn "recovered forwarding server '#{@name}'", host: @host, port: @port
+ return true
+ else
+ return nil
+ end
+ end
+
+ def to_msgpack(out = '')
+ [@host, @port, @weight, @available].to_msgpack(out)
+ end
+ end
+
+ # Override Node to disable heartbeat
+ class NoneHeartbeatNode < Node
+ def available?
+ true
+ end
+
+ def tick
+ false
+ end
+
+ def heartbeat(detect=true)
+ true
+ end
+ end
+
+ class FailureDetector
+ PHI_FACTOR = 1.0 / Math.log(10.0)
+ SAMPLE_SIZE = 1000
+
+ def initialize(heartbeat_interval, hard_timeout, init_last)
+ @heartbeat_interval = heartbeat_interval
+ @last = init_last
+ @hard_timeout = hard_timeout
+
+ # microsec
+ @init_gap = (heartbeat_interval * 1e6).to_i
+ @window = [@init_gap]
+ end
+
+ def hard_timeout?(now)
+ now - @last > @hard_timeout
+ end
+
+ def add(now)
+ if @window.empty?
+ @window << @init_gap
+ @last = now
+ else
+ gap = now - @last
+ @window << (gap * 1e6).to_i
+ @window.shift if @window.length > SAMPLE_SIZE
+ @last = now
+ end
+ end
+
+ def phi(now)
+ size = @window.size
+ return 0.0 if size == 0
+
+ # Calculate weighted moving average
+ mean_usec = 0
+ fact = 0
+ @window.each_with_index {|gap,i|
+ mean_usec += gap * (1+i)
+ fact += (1+i)
+ }
+ mean_usec = mean_usec / fact
+
+ # Normalize arrive intervals into 1sec
+ mean = (mean_usec.to_f / 1e6) - @heartbeat_interval + 1
+
+ # Calculate phi of the phi accrual failure detector
+ t = now - @last - @heartbeat_interval + 1
+ phi = PHI_FACTOR * t / mean
+
+ return phi
+ end
+
+ def sample_size
+ @window.size
+ end
+
+ def clear
+ @window.clear
+ @last = 0
+ end
+ end
+
+ ## TODO
+ #class RPC
+ # def initialize(this)
+ # @this = this
+ # end
+ #
+ # def list_nodes
+ # @this.nodes
+ # end
+ #
+ # def list_fault_nodes
+ # list_nodes.select {|n| !n.available? }
+ # end
+ #
+ # def list_available_nodes
+ # list_nodes.select {|n| n.available? }
+ # end
+ #
+ # def add_node(name, host, port, weight)
+ # end
+ #
+ # def recover_node(host, port)
+ # end
+ #
+ # def remove_node(host, port)
+ # end
+ #end
+ end
+end
diff --git a/source/code/plugin/out_mdm.rb b/source/code/plugin/out_mdm.rb
index 4b9d50a29..b8d10090d 100644
--- a/source/code/plugin/out_mdm.rb
+++ b/source/code/plugin/out_mdm.rb
@@ -55,8 +55,9 @@ def start
if aks_region.to_s.empty?
@log.info "Environment Variable AKS_REGION is not set.. "
@can_send_data_to_mdm = false
+ else
+ aks_region = aks_region.gsub(" ","")
end
- aks_region = aks_region.gsub(" ","")
if @can_send_data_to_mdm
@log.info "MDM Metrics supported in #{aks_region} region"
diff --git a/test/code/plugin/health/cadvisor_perf.json b/test/code/plugin/health/cadvisor_perf.json
new file mode 100644
index 000000000..35eae32b6
--- /dev/null
+++ b/test/code/plugin/health/cadvisor_perf.json
@@ -0,0 +1,2540 @@
+[
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:39Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 14061568
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:44Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 7249920
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:45Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 14442496
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:49Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 5988352
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:43Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 40284160
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:41Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 101965824
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 3203072
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:42Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 9658368
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:42Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 21491712
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639906
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639899
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639895
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639903
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566580259
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566589936
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1563224142
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1563224144
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639893
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:39Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 349987
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:44Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 773186
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:45Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 2718196
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:49Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 2007695
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:43Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 674463
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:41Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 2159553
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 3575667
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:42Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 0
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:42Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 633968
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:39Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 11546624
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:39Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 11546624
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:44Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 5652480
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:45Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 10981376
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:49Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 2875392
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:43Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/f65e6a62-c5c8-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 20627456
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:41Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 69353472
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 462848
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:42Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/e690309f-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 8212480
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:42Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9543dbb7-a1f2-11e9-8b08-d602e29755d5/metrics-server",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 16543744
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:45Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 814518272
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:45Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 82091339.40983607
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:45Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 2089115648
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:50Z",
+ "Host": "aks-nodepool1-19574989-1",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-1",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1552408751.22
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:56Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 85528576
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:54Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 25415680
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:53Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 111738880
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:55Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 8417280
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:01Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 19492864
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 12918784
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:46Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 3379200
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 9818112
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566590024
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566580398
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566589942
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566580342
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566580337
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639936
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1563224072
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1563224077
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:56Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 4447595
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:54Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 2765529
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:53Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 5565414
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:55Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 863810
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:01Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 886196
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 855014
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:46Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 1794634
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 0
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:56Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 76308480
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:54Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/49e373c8-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 21319680
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:53Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/65a6f978-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 78180352
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:55Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster-nanny",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 7909376
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:01Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/24ab7e32-c5c9-11e9-8736-86290fd7dd1f/heapster",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 18968576
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/9583b2ab-a1f2-11e9-8b08-d602e29755d5/main",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 9871360
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:46Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 462848
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/bb3d3ef2-a742-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 8212480
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 865943552
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 95432166.25
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:12:57Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 2191216640
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:03Z",
+ "Host": "aks-nodepool1-19574989-0",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-0",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1552408749.66
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:07Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 17743872
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:12Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 24162304
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:07Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 11472896
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:06Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 3821568
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 92057600
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1565641691
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566580300
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1565204288
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1565204284
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566589995
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:07Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 35140951
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:12Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 983407
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:07Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 0
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:06Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 4221562
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 1881274
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:07Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b2a0e1b3-bd3f-11e9-b2a7-d61658c73830/tunnel-front",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 4161536
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:12Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/114f7246-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 18952192
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:07Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 8224768
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:06Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/44a61692-b945-11e9-a1b6-127094e7fd94/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 483328
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/8dbd5e8b-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 74915840
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:14Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 554704896
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:14Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 88981130.86666666
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:14Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 1633976320
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:14:15Z",
+ "Host": "aks-nodepool1-19574989-3",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-3",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1565204130.6
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 92954624
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:33Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 7446528
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:22Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 14811136
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 15114240
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:35Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 5406720
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:32Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 10043392
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 58052608
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 9904128
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 3645440
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566590079
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639920
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639940
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639904
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639932
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1562639909
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1566580349
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1563224204
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1563224199
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 3004849
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:33Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 796842
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:22Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 708906
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 3451625
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:35Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 2572419
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:32Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 548275
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 1740316
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 0
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 3156661
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/be78d7f6-c5df-11e9-8736-86290fd7dd1f/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 66428928
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:33Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/dnsmasq",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 5611520
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:22Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 11833344
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/kubedns",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 11063296
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:35Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/95046bc6-a1f2-11e9-8b08-d602e29755d5/healthz",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 2551808
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:32Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/94e52ab1-a1f2-11e9-8b08-d602e29755d5/autoscaler",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 9244672
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:37Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/2c3de48d-c5c9-11e9-8736-86290fd7dd1f/kube-proxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 20402176
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/azureproxy",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 8216576
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:31Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/06fef5f6-a743-11e9-a38a-22d1c75c4357/redirector",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 462848
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:30Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2",
+ "Collections": [
+ {
+ "CounterName": "memoryRssBytes",
+ "Value": 853344256
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:30Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2",
+ "Collections": [
+ {
+ "CounterName": "cpuUsageNanoCores",
+ "Value": 114265842.16
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:30Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 1892982784
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ },
+ {
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-23T22:13:40Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SNode",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/aks-nodepool1-19574989-2",
+ "Collections": [
+ {
+ "CounterName": "restartTimeEpoch",
+ "Value": 1561082409.36
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ }
+]
\ No newline at end of file
diff --git a/test/code/plugin/health/deployments.json b/test/code/plugin/health/deployments.json
new file mode 100644
index 000000000..75586db04
--- /dev/null
+++ b/test/code/plugin/health/deployments.json
@@ -0,0 +1,1385 @@
+{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "2"
+ },
+ "creationTimestamp": "2019-08-23T17:12:00Z",
+ "generation": 2,
+ "labels": {
+ "addonmanager.kubernetes.io/mode": "EnsureExists",
+ "k8s-app": "heapster",
+ "kubernetes.io/cluster-service": "true"
+ },
+ "name": "heapster",
+ "namespace": "kube-system",
+ "resourceVersion": "19048928",
+ "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/heapster",
+ "uid": "1e98c3d1-c5c9-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 2147483647,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "k8s-app": "heapster"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": 1,
+ "maxUnavailable": 1
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "k8s-app": "heapster"
+ }
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/heapster",
+ "--source=kubernetes.summary_api:\"\""
+ ],
+ "image": "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/healthz",
+ "port": 8082,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 180,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "heapster",
+ "resources": {
+ "limits": {
+ "cpu": "88m",
+ "memory": "204Mi"
+ },
+ "requests": {
+ "cpu": "88m",
+ "memory": "204Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File"
+ },
+ {
+ "command": [
+ "/pod_nanny",
+ "--config-dir=/etc/config",
+ "--cpu=80m",
+ "--extra-cpu=0.5m",
+ "--memory=140Mi",
+ "--extra-memory=4Mi",
+ "--threshold=5",
+ "--deployment=heapster",
+ "--container=heapster",
+ "--poll-period=300000",
+ "--estimator=exponential"
+ ],
+ "env": [
+ {
+ "name": "MY_POD_NAME",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "metadata.name"
+ }
+ }
+ },
+ {
+ "name": "MY_POD_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "heapster-nanny",
+ "resources": {
+ "limits": {
+ "cpu": "50m",
+ "memory": "90Mi"
+ },
+ "requests": {
+ "cpu": "50m",
+ "memory": "90Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/config",
+ "name": "heapster-config-volume"
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "heapster",
+ "serviceAccountName": "heapster",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "heapster-config"
+ },
+ "name": "heapster-config-volume"
+ }
+ ]
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2019-08-23T17:12:00Z",
+ "lastUpdateTime": "2019-08-23T17:12:00Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ }
+ ],
+ "observedGeneration": 2,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "updatedReplicas": 1
+ }
+ },
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "5",
+ "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"kube-dns-autoscaler\",\"kubernetes.io/cluster-service\":\"true\"},\"name\":\"kube-dns-autoscaler\",\"namespace\":\"kube-system\"},\"spec\":{\"selector\":{\"matchLabels\":{\"k8s-app\":\"kube-dns-autoscaler\"}},\"template\":{\"metadata\":{\"annotations\":{\"scheduler.alpha.kubernetes.io/critical-pod\":\"\",\"seccomp.security.alpha.kubernetes.io/pod\":\"docker/default\"},\"labels\":{\"k8s-app\":\"kube-dns-autoscaler\"}},\"spec\":{\"containers\":[{\"command\":[\"/cluster-proportional-autoscaler\",\"--namespace=kube-system\",\"--configmap=kube-dns-autoscaler\",\"--target=deployment/kube-dns-v20\",\"--default-params={\\\"ladder\\\":{\\\"coresToReplicas\\\":[[1,2],[512,3],[1024,4],[2048,5]],\\\"nodesToReplicas\\\":[[1,2],[8,3],[16,4],[32,5]]}}\",\"--logtostderr=true\",\"--v=2\"],\"image\":\"aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2\",\"name\":\"autoscaler\",\"resources\":{\"requests\":{\"cpu\":\"20m\",\"memory\":\"10Mi\"}}}],\"dnsPolicy\":\"Default\",\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"kube-dns-autoscaler\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}]}}}}\n"
+ },
+ "creationTimestamp": "2019-03-12T16:38:30Z",
+ "generation": 5,
+ "labels": {
+ "addonmanager.kubernetes.io/mode": "Reconcile",
+ "k8s-app": "kube-dns-autoscaler",
+ "kubernetes.io/cluster-service": "true"
+ },
+ "name": "kube-dns-autoscaler",
+ "namespace": "kube-system",
+ "resourceVersion": "15144046",
+ "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns-autoscaler",
+ "uid": "4509acaf-44e5-11e9-9920-423525a6b683"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 2147483647,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "k8s-app": "kube-dns-autoscaler"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": 1,
+ "maxUnavailable": 1
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "annotations": {
+ "scheduler.alpha.kubernetes.io/critical-pod": "",
+ "seccomp.security.alpha.kubernetes.io/pod": "docker/default"
+ },
+ "creationTimestamp": null,
+ "labels": {
+ "k8s-app": "kube-dns-autoscaler"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "command": [
+ "/cluster-proportional-autoscaler",
+ "--namespace=kube-system",
+ "--configmap=kube-dns-autoscaler",
+ "--target=deployment/kube-dns-v20",
+ "--default-params={\"ladder\":{\"coresToReplicas\":[[1,2],[512,3],[1024,4],[2048,5]],\"nodesToReplicas\":[[1,2],[8,3],[16,4],[32,5]]}}",
+ "--logtostderr=true",
+ "--v=2"
+ ],
+ "image": "aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "autoscaler",
+ "resources": {
+ "requests": {
+ "cpu": "20m",
+ "memory": "10Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File"
+ }
+ ],
+ "dnsPolicy": "Default",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-dns-autoscaler",
+ "serviceAccountName": "kube-dns-autoscaler",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ }
+ ]
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2019-03-12T16:38:30Z",
+ "lastUpdateTime": "2019-03-12T16:38:30Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ }
+ ],
+ "observedGeneration": 5,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "updatedReplicas": 1
+ }
+ },
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "6",
+ "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"kube-dns\",\"kubernetes.io/cluster-service\":\"true\",\"version\":\"v20\"},\"name\":\"kube-dns-v20\",\"namespace\":\"kube-system\"},\"spec\":{\"selector\":{\"matchLabels\":{\"k8s-app\":\"kube-dns\",\"version\":\"v20\"}},\"template\":{\"metadata\":{\"annotations\":{\"prometheus.io/port\":\"10055\",\"prometheus.io/scrape\":\"true\"},\"labels\":{\"k8s-app\":\"kube-dns\",\"kubernetes.io/cluster-service\":\"true\",\"version\":\"v20\"}},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}},\"podAntiAffinity\":{\"preferredDuringSchedulingIgnoredDuringExecution\":[{\"podAffinityTerm\":{\"labelSelector\":{\"matchExpressions\":[{\"key\":\"k8s-app\",\"operator\":\"In\",\"values\":[\"kube-dns\"]}]},\"topologyKey\":\"kubernetes.io/hostname\"},\"weight\":100}]}},\"containers\":[{\"args\":[\"--kubecfg-file=/config/kubeconfig\",\"--config-dir=/kube-dns-config\",\"--domain=cluster.local.\",\"--dns-port=10053\",\"--v=2\"],\"env\":[{\"name\":\"PROMETHEUS_PORT\",\"value\":\"10055\"}],\"image\":\"aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13\",\"livenessProbe\":{\"failureThreshold\":5,\"httpGet\":{\"path\":\"/healthcheck/kubedns\",\"port\":10054,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":5},\"name\":\"kubedns\",\"ports\":[{\"containerPort\":10053,\"name\":\"dns-local\",\"protocol\":\"UDP\"},{\"containerPort\":10053,\"name\":\"dns-tcp-local\",\"protocol\":\"TCP\"},{\"containerPort\":10055,\"name\":\"metrics\",\"protocol\":\"TCP\"}],\"readinessProbe\":{\"httpGet\":{\"path\":\"/readiness\",\"port\":8081,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":30,\"timeoutSeconds\":5},\"resources\":{\"limits\":{\"memory\":\"170Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"70Mi\"}},\"volumeMounts\":[{\"mountPath\":\"/kube-dns-config\",\"name\":\"kube-dns-config\"},{\"mountPath\":\"/config\",\"name\":\"kubedns-kubecfg\",\"readOnly\":true}]},{\"args\":[\"-v=2\",\"-logtostderr\",\"-configDir=/kube-dns-config\",\"-restartDnsmasq=true\",\"--\",\"-k\",\"--cache-size=1000\",\"--no-negcache\",\"--no-resolv\",\"--server=127.0.0.1#10053\",\"--server=/cluster.local/127.0.0.1#10053\",\"--server=/in-addr.arpa/127.0.0.1#10053\",\"--server=/ip6.arpa/127.0.0.1#10053\",\"--log-facility=-\"],\"image\":\"aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10\",\"name\":\"dnsmasq\",\"ports\":[{\"containerPort\":53,\"name\":\"dns\",\"protocol\":\"UDP\"},{\"containerPort\":53,\"name\":\"dns-tcp\",\"protocol\":\"TCP\"}],\"volumeMounts\":[{\"mountPath\":\"/kube-dns-config\",\"name\":\"kube-dns-config\"}]},{\"args\":[\"--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \\u003e/dev/null || exit 1; done\",\"--url=/healthz-dnsmasq\",\"--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \\u003e/dev/null || exit 1; done\",\"--url=/healthz-kubedns\",\"--port=8080\",\"--quiet\"],\"env\":[{\"name\":\"PROBE_DOMAINS\",\"value\":\"bing.com kubernetes.default.svc.cluster.local\"}],\"image\":\"aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2\",\"livenessProbe\":{\"failureThreshold\":5,\"httpGet\":{\"path\":\"/healthz-dnsmasq\",\"port\":8080,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":5},\"name\":\"healthz\",\"ports\":[{\"containerPort\":8080,\"protocol\":\"TCP\"}],\"resources\":{\"limits\":{\"memory\":\"50Mi\"},\"requests\":{\"cpu\":\"10m\",\"memory\":\"50Mi\"}}},{\"args\":[\"--v=2\",\"--logtostderr\",\"--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV\",\"--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV\"],\"image\":\"aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10\",\"livenessProbe\":{\"httpGet\":{\"path\":\"/metrics\",\"port\":10054,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":60,\"successThreshold\":1,\"timeoutSeconds\":5},\"name\":\"sidecar\",\"ports\":[{\"containerPort\":10054,\"name\":\"metrics\",\"protocol\":\"TCP\"}],\"resources\":{\"requests\":{\"cpu\":\"10m\",\"memory\":\"20Mi\"}}}],\"dnsPolicy\":\"Default\",\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"kube-dns\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}],\"volumes\":[{\"configMap\":{\"name\":\"kube-dns\",\"optional\":true},\"name\":\"kube-dns-config\"},{\"configMap\":{\"name\":\"kubedns-kubecfg\"},\"name\":\"kubedns-kubecfg\"}]}}}}\n"
+ },
+ "creationTimestamp": "2019-03-12T16:38:30Z",
+ "generation": 7,
+ "labels": {
+ "addonmanager.kubernetes.io/mode": "Reconcile",
+ "k8s-app": "kube-dns",
+ "kubernetes.io/cluster-service": "true",
+ "version": "v20"
+ },
+ "name": "kube-dns-v20",
+ "namespace": "kube-system",
+ "resourceVersion": "15144054",
+ "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/kube-dns-v20",
+ "uid": "4523fcd7-44e5-11e9-9920-423525a6b683"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 2147483647,
+ "replicas": 2,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "k8s-app": "kube-dns",
+ "version": "v20"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": 1,
+ "maxUnavailable": 1
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "annotations": {
+ "prometheus.io/port": "10055",
+ "prometheus.io/scrape": "true"
+ },
+ "creationTimestamp": null,
+ "labels": {
+ "k8s-app": "kube-dns",
+ "kubernetes.io/cluster-service": "true",
+ "version": "v20"
+ }
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "podAntiAffinity": {
+ "preferredDuringSchedulingIgnoredDuringExecution": [
+ {
+ "podAffinityTerm": {
+ "labelSelector": {
+ "matchExpressions": [
+ {
+ "key": "k8s-app",
+ "operator": "In",
+ "values": [
+ "kube-dns"
+ ]
+ }
+ ]
+ },
+ "topologyKey": "kubernetes.io/hostname"
+ },
+ "weight": 100
+ }
+ ]
+ }
+ },
+ "containers": [
+ {
+ "args": [
+ "--kubecfg-file=/config/kubeconfig",
+ "--config-dir=/kube-dns-config",
+ "--domain=cluster.local.",
+ "--dns-port=10053",
+ "--v=2"
+ ],
+ "env": [
+ {
+ "name": "PROMETHEUS_PORT",
+ "value": "10055"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 5,
+ "httpGet": {
+ "path": "/healthcheck/kubedns",
+ "port": 10054,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "kubedns",
+ "ports": [
+ {
+ "containerPort": 10053,
+ "name": "dns-local",
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 10053,
+ "name": "dns-tcp-local",
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 10055,
+ "name": "metrics",
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/readiness",
+ "port": 8081,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 30,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "resources": {
+ "limits": {
+ "memory": "170Mi"
+ },
+ "requests": {
+ "cpu": "100m",
+ "memory": "70Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/kube-dns-config",
+ "name": "kube-dns-config"
+ },
+ {
+ "mountPath": "/config",
+ "name": "kubedns-kubecfg",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "-v=2",
+ "-logtostderr",
+ "-configDir=/kube-dns-config",
+ "-restartDnsmasq=true",
+ "--",
+ "-k",
+ "--cache-size=1000",
+ "--no-negcache",
+ "--no-resolv",
+ "--server=127.0.0.1#10053",
+ "--server=/cluster.local/127.0.0.1#10053",
+ "--server=/in-addr.arpa/127.0.0.1#10053",
+ "--server=/ip6.arpa/127.0.0.1#10053",
+ "--log-facility=-"
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "dnsmasq",
+ "ports": [
+ {
+ "containerPort": 53,
+ "name": "dns",
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 53,
+ "name": "dns-tcp",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {},
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/kube-dns-config",
+ "name": "kube-dns-config"
+ }
+ ]
+ },
+ {
+ "args": [
+ "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \u003e/dev/null || exit 1; done",
+ "--url=/healthz-dnsmasq",
+ "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \u003e/dev/null || exit 1; done",
+ "--url=/healthz-kubedns",
+ "--port=8080",
+ "--quiet"
+ ],
+ "env": [
+ {
+ "name": "PROBE_DOMAINS",
+ "value": "bing.com kubernetes.default.svc.cluster.local"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 5,
+ "httpGet": {
+ "path": "/healthz-dnsmasq",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "healthz",
+ "ports": [
+ {
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "50Mi"
+ },
+ "requests": {
+ "cpu": "10m",
+ "memory": "50Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File"
+ },
+ {
+ "args": [
+ "--v=2",
+ "--logtostderr",
+ "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV",
+ "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV"
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/metrics",
+ "port": 10054,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "sidecar",
+ "ports": [
+ {
+ "containerPort": 10054,
+ "name": "metrics",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "10m",
+ "memory": "20Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File"
+ }
+ ],
+ "dnsPolicy": "Default",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-dns",
+ "serviceAccountName": "kube-dns",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "kube-dns",
+ "optional": true
+ },
+ "name": "kube-dns-config"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "kubedns-kubecfg"
+ },
+ "name": "kubedns-kubecfg"
+ }
+ ]
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 2,
+ "conditions": [
+ {
+ "lastTransitionTime": "2019-07-23T14:46:03Z",
+ "lastUpdateTime": "2019-07-23T14:46:03Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ }
+ ],
+ "observedGeneration": 7,
+ "readyReplicas": 2,
+ "replicas": 2,
+ "updatedReplicas": 2
+ }
+ },
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "6",
+ "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"kubernetes-dashboard\",\"kubernetes.io/cluster-service\":\"true\"},\"name\":\"kubernetes-dashboard\",\"namespace\":\"kube-system\"},\"spec\":{\"replicas\":1,\"strategy\":{\"rollingUpdate\":{\"maxSurge\":0,\"maxUnavailable\":1},\"type\":\"RollingUpdate\"},\"template\":{\"metadata\":{\"labels\":{\"k8s-app\":\"kubernetes-dashboard\",\"kubernetes.io/cluster-service\":\"true\"}},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}}},\"containers\":[{\"image\":\"aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1\",\"livenessProbe\":{\"failureThreshold\":3,\"httpGet\":{\"path\":\"/\",\"port\":9090,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":30,\"periodSeconds\":10,\"successThreshold\":1,\"timeoutSeconds\":30},\"name\":\"main\",\"ports\":[{\"containerPort\":9090,\"name\":\"http\",\"protocol\":\"TCP\"}],\"resources\":{\"limits\":{\"cpu\":\"100m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"50Mi\"}}}],\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"kubernetes-dashboard\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}]}}}}\n"
+ },
+ "creationTimestamp": "2019-03-12T16:38:31Z",
+ "generation": 6,
+ "labels": {
+ "addonmanager.kubernetes.io/mode": "Reconcile",
+ "k8s-app": "kubernetes-dashboard",
+ "kubernetes.io/cluster-service": "true"
+ },
+ "name": "kubernetes-dashboard",
+ "namespace": "kube-system",
+ "resourceVersion": "15831521",
+ "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/kubernetes-dashboard",
+ "uid": "45b9cc8d-44e5-11e9-9920-423525a6b683"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 2147483647,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "k8s-app": "kubernetes-dashboard",
+ "kubernetes.io/cluster-service": "true"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": 0,
+ "maxUnavailable": 1
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "k8s-app": "kubernetes-dashboard",
+ "kubernetes.io/cluster-service": "true"
+ }
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "image": "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/",
+ "port": 9090,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 30,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 30
+ },
+ "name": "main",
+ "ports": [
+ {
+ "containerPort": 9090,
+ "name": "http",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "100m",
+ "memory": "500Mi"
+ },
+ "requests": {
+ "cpu": "100m",
+ "memory": "50Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File"
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kubernetes-dashboard",
+ "serviceAccountName": "kubernetes-dashboard",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ }
+ ]
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2019-03-12T16:38:32Z",
+ "lastUpdateTime": "2019-03-12T16:38:32Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ }
+ ],
+ "observedGeneration": 6,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "updatedReplicas": 1
+ }
+ },
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "5",
+ "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"metrics-server\",\"kubernetes.io/cluster-service\":\"true\"},\"name\":\"metrics-server\",\"namespace\":\"kube-system\"},\"spec\":{\"selector\":{\"matchLabels\":{\"k8s-app\":\"metrics-server\"}},\"template\":{\"metadata\":{\"labels\":{\"k8s-app\":\"metrics-server\"},\"name\":\"metrics-server\"},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}}},\"containers\":[{\"command\":[\"/metrics-server\",\"--source=kubernetes.summary_api:''\"],\"image\":\"aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1\",\"imagePullPolicy\":\"IfNotPresent\",\"name\":\"metrics-server\"}],\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"metrics-server\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}]}}}}\n"
+ },
+ "creationTimestamp": "2019-03-12T16:38:31Z",
+ "generation": 5,
+ "labels": {
+ "addonmanager.kubernetes.io/mode": "Reconcile",
+ "k8s-app": "metrics-server",
+ "kubernetes.io/cluster-service": "true"
+ },
+ "name": "metrics-server",
+ "namespace": "kube-system",
+ "resourceVersion": "15144043",
+ "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/metrics-server",
+ "uid": "45556857-44e5-11e9-9920-423525a6b683"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 2147483647,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "k8s-app": "metrics-server"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": 1,
+ "maxUnavailable": 1
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "k8s-app": "metrics-server"
+ },
+ "name": "metrics-server"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/metrics-server",
+ "--source=kubernetes.summary_api:''"
+ ],
+ "image": "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "metrics-server",
+ "resources": {},
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File"
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "metrics-server",
+ "serviceAccountName": "metrics-server",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ }
+ ]
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2019-03-12T16:38:31Z",
+ "lastUpdateTime": "2019-03-12T16:38:31Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ }
+ ],
+ "observedGeneration": 5,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "updatedReplicas": 1
+ }
+ },
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "7",
+ "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"name\":\"omsagent-rs\",\"namespace\":\"kube-system\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"rsName\":\"omsagent-rs\"}},\"strategy\":{\"type\":\"RollingUpdate\"},\"template\":{\"metadata\":{\"annotations\":{\"agentVersion\":\"1.10.0.1\",\"dockerProviderVersion\":\"6.0.0-0\",\"schema-versions\":\"v1\"},\"labels\":{\"rsName\":\"omsagent-rs\"}},\"spec\":{\"containers\":[{\"env\":[{\"name\":\"AKS_RESOURCE_ID\",\"value\":\"/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test\"},{\"name\":\"AKS_REGION\",\"value\":\"eastus\"},{\"name\":\"CONTROLLER_TYPE\",\"value\":\"ReplicaSet\"},{\"name\":\"NODE_IP\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"status.hostIP\"}}}],\"image\":\"mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019\",\"imagePullPolicy\":\"IfNotPresent\",\"livenessProbe\":{\"exec\":{\"command\":[\"/bin/bash\",\"-c\",\"/opt/livenessprobe.sh\"]},\"initialDelaySeconds\":60,\"periodSeconds\":60},\"name\":\"omsagent\",\"ports\":[{\"containerPort\":25225,\"protocol\":\"TCP\"},{\"containerPort\":25224,\"protocol\":\"UDP\"},{\"containerPort\":25227,\"name\":\"in-rs-tcp\",\"protocol\":\"TCP\"}],\"resources\":{\"limits\":{\"cpu\":\"150m\",\"memory\":\"500Mi\"},\"requests\":{\"cpu\":\"110m\",\"memory\":\"250Mi\"}},\"securityContext\":{\"privileged\":true},\"volumeMounts\":[{\"mountPath\":\"/var/run/host\",\"name\":\"docker-sock\"},{\"mountPath\":\"/var/log\",\"name\":\"host-log\"},{\"mountPath\":\"/var/lib/docker/containers\",\"name\":\"containerlog-path\"},{\"mountPath\":\"/etc/kubernetes/host\",\"name\":\"azure-json-path\"},{\"mountPath\":\"/etc/omsagent-secret\",\"name\":\"omsagent-secret\",\"readOnly\":true},{\"mountPath\":\"/etc/config\",\"name\":\"omsagent-rs-config\"},{\"mountPath\":\"/etc/config/settings\",\"name\":\"settings-vol-config\",\"readOnly\":true}]}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\",\"kubernetes.io/role\":\"agent\"},\"serviceAccountName\":\"omsagent\",\"volumes\":[{\"hostPath\":{\"path\":\"/var/run\"},\"name\":\"docker-sock\"},{\"hostPath\":{\"path\":\"/etc/hostname\"},\"name\":\"container-hostname\"},{\"hostPath\":{\"path\":\"/var/log\"},\"name\":\"host-log\"},{\"hostPath\":{\"path\":\"/var/lib/docker/containers\"},\"name\":\"containerlog-path\"},{\"hostPath\":{\"path\":\"/etc/kubernetes\"},\"name\":\"azure-json-path\"},{\"name\":\"omsagent-secret\",\"secret\":{\"secretName\":\"omsagent-secret\"}},{\"configMap\":{\"name\":\"omsagent-rs-config\"},\"name\":\"omsagent-rs-config\"},{\"configMap\":{\"name\":\"container-azm-ms-agentconfig\",\"optional\":true},\"name\":\"settings-vol-config\"}]}}}}\n"
+ },
+ "creationTimestamp": "2019-08-19T22:44:22Z",
+ "generation": 7,
+ "labels": {
+ "rsName": "omsagent-rs"
+ },
+ "name": "omsagent-rs",
+ "namespace": "kube-system",
+ "resourceVersion": "19063500",
+ "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/omsagent-rs",
+ "uid": "e32d7e82-c2d2-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 2147483647,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "rsName": "omsagent-rs"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": 1,
+ "maxUnavailable": 1
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "annotations": {
+ "agentVersion": "1.10.0.1",
+ "dockerProviderVersion": "6.0.0-0",
+ "schema-versions": "v1"
+ },
+ "creationTimestamp": null,
+ "labels": {
+ "rsName": "omsagent-rs"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "AKS_RESOURCE_ID",
+ "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test"
+ },
+ {
+ "name": "AKS_REGION",
+ "value": "eastus"
+ },
+ {
+ "name": "CONTROLLER_TYPE",
+ "value": "ReplicaSet"
+ },
+ {
+ "name": "NODE_IP",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "status.hostIP"
+ }
+ }
+ }
+ ],
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/livenessprobe.sh"
+ ]
+ },
+ "failureThreshold": 3,
+ "initialDelaySeconds": 60,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "omsagent",
+ "ports": [
+ {
+ "containerPort": 25225,
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 25224,
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 25227,
+ "name": "in-rs-tcp",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "150m",
+ "memory": "500Mi"
+ },
+ "requests": {
+ "cpu": "110m",
+ "memory": "250Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/host",
+ "name": "docker-sock"
+ },
+ {
+ "mountPath": "/var/log",
+ "name": "host-log"
+ },
+ {
+ "mountPath": "/var/lib/docker/containers",
+ "name": "containerlog-path"
+ },
+ {
+ "mountPath": "/etc/kubernetes/host",
+ "name": "azure-json-path"
+ },
+ {
+ "mountPath": "/etc/omsagent-secret",
+ "name": "omsagent-secret",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/config",
+ "name": "omsagent-rs-config"
+ },
+ {
+ "mountPath": "/etc/config/settings",
+ "name": "settings-vol-config",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux",
+ "kubernetes.io/role": "agent"
+ },
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "omsagent",
+ "serviceAccountName": "omsagent",
+ "terminationGracePeriodSeconds": 30,
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/var/run",
+ "type": ""
+ },
+ "name": "docker-sock"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/hostname",
+ "type": ""
+ },
+ "name": "container-hostname"
+ },
+ {
+ "hostPath": {
+ "path": "/var/log",
+ "type": ""
+ },
+ "name": "host-log"
+ },
+ {
+ "hostPath": {
+ "path": "/var/lib/docker/containers",
+ "type": ""
+ },
+ "name": "containerlog-path"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes",
+ "type": ""
+ },
+ "name": "azure-json-path"
+ },
+ {
+ "name": "omsagent-secret",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-secret"
+ }
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "omsagent-rs-config"
+ },
+ "name": "omsagent-rs-config"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "container-azm-ms-agentconfig",
+ "optional": true
+ },
+ "name": "settings-vol-config"
+ }
+ ]
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2019-08-19T22:44:22Z",
+ "lastUpdateTime": "2019-08-19T22:44:22Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ }
+ ],
+ "observedGeneration": 7,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "updatedReplicas": 1
+ }
+ },
+ {
+ "apiVersion": "extensions/v1beta1",
+ "kind": "Deployment",
+ "metadata": {
+ "annotations": {
+ "deployment.kubernetes.io/revision": "9",
+ "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"component\":\"tunnel\",\"kubernetes.io/cluster-service\":\"true\",\"tier\":\"node\"},\"name\":\"tunnelfront\",\"namespace\":\"kube-system\"},\"spec\":{\"replicas\":1,\"selector\":{\"matchLabels\":{\"component\":\"tunnel\"}},\"template\":{\"metadata\":{\"labels\":{\"component\":\"tunnel\"}},\"spec\":{\"affinity\":{\"nodeAffinity\":{\"requiredDuringSchedulingIgnoredDuringExecution\":{\"nodeSelectorTerms\":[{\"labelSelector\":null,\"matchExpressions\":[{\"key\":\"kubernetes.azure.com/cluster\",\"operator\":\"Exists\"}]}]}}},\"containers\":[{\"env\":[{\"name\":\"OVERRIDE_TUNNEL_SERVER_NAME\",\"value\":\"t_dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io\"},{\"name\":\"TUNNEL_CLUSTERUSER_NAME\",\"value\":\"28957308\"},{\"name\":\"TUNNELGATEWAY_SERVER_NAME\",\"value\":\"dilipr-hea-dilipr-health-te-72c8e8-0b16acad.tun.eastus.azmk8s.io\"},{\"name\":\"TUNNELGATEWAY_SSH_PORT\",\"value\":\"22\"},{\"name\":\"TUNNELGATEWAY_TLS_PORT\",\"value\":\"443\"},{\"name\":\"KUBE_CONFIG\",\"value\":\"/etc/kubernetes/kubeconfig/kubeconfig\"}],\"image\":\"aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7\",\"imagePullPolicy\":\"IfNotPresent\",\"livenessProbe\":{\"exec\":{\"command\":[\"/lib/tunnel-front/check-tunnel-connection.sh\"]},\"failureThreshold\":12,\"initialDelaySeconds\":10,\"periodSeconds\":60},\"name\":\"tunnel-front\",\"resources\":{\"requests\":{\"cpu\":\"10m\",\"memory\":\"64Mi\"}},\"securityContext\":{\"privileged\":true},\"volumeMounts\":[{\"mountPath\":\"/etc/kubernetes/kubeconfig\",\"name\":\"kubeconfig\",\"readOnly\":true},{\"mountPath\":\"/etc/kubernetes/certs\",\"name\":\"certificates\",\"readOnly\":true}]}],\"dnsPolicy\":\"Default\",\"imagePullSecrets\":[{\"name\":\"emptyacrsecret\"}],\"nodeSelector\":{\"beta.kubernetes.io/os\":\"linux\"},\"priorityClassName\":\"system-node-critical\",\"serviceAccountName\":\"tunnelfront\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}],\"volumes\":[{\"configMap\":{\"name\":\"tunnelfront-kubecfg\",\"optional\":true},\"name\":\"kubeconfig\"},{\"hostPath\":{\"path\":\"/etc/kubernetes/certs\"},\"name\":\"certificates\"}]}}}}\n"
+ },
+ "creationTimestamp": "2019-03-12T16:38:32Z",
+ "generation": 9,
+ "labels": {
+ "addonmanager.kubernetes.io/mode": "Reconcile",
+ "component": "tunnel",
+ "kubernetes.io/cluster-service": "true",
+ "tier": "node"
+ },
+ "name": "tunnelfront",
+ "namespace": "kube-system",
+ "resourceVersion": "17628811",
+ "selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/tunnelfront",
+ "uid": "45e524e6-44e5-11e9-9920-423525a6b683"
+ },
+ "spec": {
+ "progressDeadlineSeconds": 2147483647,
+ "replicas": 1,
+ "revisionHistoryLimit": 10,
+ "selector": {
+ "matchLabels": {
+ "component": "tunnel"
+ }
+ },
+ "strategy": {
+ "rollingUpdate": {
+ "maxSurge": 1,
+ "maxUnavailable": 1
+ },
+ "type": "RollingUpdate"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "component": "tunnel"
+ }
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "OVERRIDE_TUNNEL_SERVER_NAME",
+ "value": "t_dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "TUNNEL_CLUSTERUSER_NAME",
+ "value": "28957308"
+ },
+ {
+ "name": "TUNNELGATEWAY_SERVER_NAME",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-0b16acad.tun.eastus.azmk8s.io"
+ },
+ {
+ "name": "TUNNELGATEWAY_SSH_PORT",
+ "value": "22"
+ },
+ {
+ "name": "TUNNELGATEWAY_TLS_PORT",
+ "value": "443"
+ },
+ {
+ "name": "KUBE_CONFIG",
+ "value": "/etc/kubernetes/kubeconfig/kubeconfig"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/lib/tunnel-front/check-tunnel-connection.sh"
+ ]
+ },
+ "failureThreshold": 12,
+ "initialDelaySeconds": 10,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "tunnel-front",
+ "resources": {
+ "requests": {
+ "cpu": "10m",
+ "memory": "64Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/kubernetes/kubeconfig",
+ "name": "kubeconfig",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/kubernetes/certs",
+ "name": "certificates",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "Default",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "tunnelfront",
+ "serviceAccountName": "tunnelfront",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "tunnelfront-kubecfg",
+ "optional": true
+ },
+ "name": "kubeconfig"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes/certs",
+ "type": ""
+ },
+ "name": "certificates"
+ }
+ ]
+ }
+ }
+ },
+ "status": {
+ "availableReplicas": 1,
+ "conditions": [
+ {
+ "lastTransitionTime": "2019-03-12T16:38:32Z",
+ "lastUpdateTime": "2019-03-12T16:38:32Z",
+ "message": "Deployment has minimum availability.",
+ "reason": "MinimumReplicasAvailable",
+ "status": "True",
+ "type": "Available"
+ }
+ ],
+ "observedGeneration": 9,
+ "readyReplicas": 1,
+ "replicas": 1,
+ "updatedReplicas": 1
+ }
+ }
+ ],
+ "kind": "List",
+ "metadata": {
+ "resourceVersion": "",
+ "selfLink": ""
+ }
+}
diff --git a/test/code/plugin/health/health_container_cpu_memory_aggregator_spec.rb b/test/code/plugin/health/health_container_cpu_memory_aggregator_spec.rb
new file mode 100644
index 000000000..074878fe2
--- /dev/null
+++ b/test/code/plugin/health/health_container_cpu_memory_aggregator_spec.rb
@@ -0,0 +1,190 @@
+require_relative '../test_helpers'
+Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/code/plugin/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file }
+include HealthModel
+
+describe 'HealthContainerCpuMemoryAggregator spec' do
+
+ it 'dedupes and drops older records' do
+ formatted_records = JSON.parse'[{
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "CounterName": "memoryRssBytes",
+ "CounterValue": 14061568,
+ "Timestamp": "2019-08-23T23:13:39Z"
+ },
+ {
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/952488f3-a1f2-11e9-8b08-d602e29755d5/sidecar",
+ "CounterName": "memoryRssBytes",
+ "CounterValue": 14061568,
+ "Timestamp": "2019-08-23T22:13:39Z"
+ }]'
+
+ resources = HealthKubernetesResources.instance
+ nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json')))
+ pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json')))
+ deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json')))
+
+ resources.pod_inventory = pods
+ resources.node_inventory = nodes
+ resources.set_deployment_inventory(deployments)
+ resources.build_pod_uid_lookup #call this in in_kube_health every min
+
+ cluster_labels = {
+ 'container.azm.ms/cluster-region' => 'eastus',
+ 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a',
+ 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test',
+ 'container.azm.ms/cluster-name' => 'dilipr-health-test'
+ }
+ cluster_id = 'fake_cluster_id'
+ provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json"))
+ aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider)
+ deduped_records = aggregator.dedupe_records(formatted_records)
+ deduped_records.size.must_equal 1
+ deduped_records[0]["Timestamp"].must_equal "2019-08-23T23:13:39Z"
+ end
+
+ it 'aggregates based on container name' do
+ file = File.read(File.join(File.expand_path(File.dirname(__FILE__)),'cadvisor_perf.json'))
+ records = JSON.parse(file)
+ records = records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'}
+ formatted_records = []
+ formatter = HealthContainerCpuMemoryRecordFormatter.new
+ records.each{|record|
+ formatted_record = formatter.get_record_from_cadvisor_record(record)
+ formatted_records.push(formatted_record)
+ }
+
+ resources = HealthKubernetesResources.instance
+ nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json')))
+ pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json')))
+ deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json')))
+
+ resources.pod_inventory = pods
+ resources.node_inventory = nodes
+ resources.set_deployment_inventory(deployments)
+ resources.build_pod_uid_lookup #call this in in_kube_health every min
+
+ cluster_labels = {
+ 'container.azm.ms/cluster-region' => 'eastus',
+ 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a',
+ 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test',
+ 'container.azm.ms/cluster-name' => 'dilipr-health-test'
+ }
+
+ cluster_id = 'fake_cluster_id'
+
+ provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json"))
+
+ aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider)
+ deduped_records = aggregator.dedupe_records(formatted_records)
+ aggregator.aggregate(deduped_records)
+ aggregator.compute_state
+ records = aggregator.get_records
+ records.size.must_equal 30
+ #records have all the required details
+ records.each{|record|
+ record["Details"]["details"]["container"].wont_be_nil
+ record["Details"]["details"]["workload_name"].wont_be_nil
+ record["Details"]["details"]["workload_kind"].wont_be_nil
+ record["Details"]["details"]["namespace"].wont_be_nil
+ record["Details"]["timestamp"].wont_be_nil
+ record["Details"]["state"].wont_be_nil
+ record["MonitorTypeId"].wont_be_nil
+ record["MonitorInstanceId"].wont_be_nil
+ record["TimeFirstObserved"].wont_be_nil
+ record["TimeGenerated"].wont_be_nil
+ }
+ end
+
+ it "calculates the state correctly" do
+ file = File.read(File.join(File.expand_path(File.dirname(__FILE__)),'cadvisor_perf.json'))
+ records = JSON.parse(file)
+ records = records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'}
+ formatted_records = []
+ formatter = HealthContainerCpuMemoryRecordFormatter.new
+ records.each{|record|
+ formatted_record = formatter.get_record_from_cadvisor_record(record)
+ formatted_records.push(formatted_record)
+ }
+
+ resources = HealthKubernetesResources.instance
+ nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json')))
+ pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json')))
+ deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json')))
+
+ resources.pod_inventory = pods
+ resources.node_inventory = nodes
+ resources.set_deployment_inventory(deployments)
+ resources.build_pod_uid_lookup #call this in in_kube_health every min
+
+ cluster_labels = {
+ 'container.azm.ms/cluster-region' => 'eastus',
+ 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a',
+ 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test',
+ 'container.azm.ms/cluster-name' => 'dilipr-health-test'
+ }
+
+ cluster_id = 'fake_cluster_id'
+
+ provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json"))
+
+ aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider)
+ deduped_records = aggregator.dedupe_records(formatted_records)
+ aggregator.aggregate(deduped_records)
+ aggregator.compute_state
+ records = aggregator.get_records
+
+ #omsagent has limit set. So its state should be set to pass.
+ #sidecar has no limit set. its state should be set to warning
+ omsagent_record = records.select{|r| r["MonitorTypeId"] == MonitorId::CONTAINER_CPU_MONITOR_ID && r["Details"]["details"]["container"] == "omsagent"}[0]
+ sidecar_record = records.select{|r| r["MonitorTypeId"] == MonitorId::CONTAINER_CPU_MONITOR_ID && r["Details"]["details"]["container"] == "sidecar"}[0]
+ omsagent_record['Details']['state'].must_equal HealthMonitorStates::PASS #limit is set
+ sidecar_record['Details']['state'].must_equal HealthMonitorStates::PASS
+ end
+
+
+ it "calculates the state as unknown when signals are missing" do
+ file = File.read(File.join(File.expand_path(File.dirname(__FILE__)),'cadvisor_perf.json'))
+ records = JSON.parse(file)
+ records = records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'}
+ formatted_records = []
+ formatter = HealthContainerCpuMemoryRecordFormatter.new
+ records.each{|record|
+ formatted_record = formatter.get_record_from_cadvisor_record(record)
+ formatted_records.push(formatted_record)
+ }
+
+ formatted_records = formatted_records.reject{|r| r["InstanceName"] == "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/69e68b21-c5df-11e9-8736-86290fd7dd1f/omsagent" && r["CounterName"] == "cpuUsageNanoCores"}
+ formatted_records = formatted_records.reject{|r| r["InstanceName"] == "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/b1e04e1c-c5df-11e9-8736-86290fd7dd1f/omsagent" && r["CounterName"] == "cpuUsageNanoCores"}
+
+ resources = HealthKubernetesResources.instance
+ nodes = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'nodes.json')))
+ pods = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'pods.json')))
+ deployments = JSON.parse(File.read(File.join(File.expand_path(File.dirname(__FILE__)),'deployments.json')))
+
+ resources.pod_inventory = pods
+ resources.node_inventory = nodes
+ resources.set_deployment_inventory(deployments)
+ resources.build_pod_uid_lookup #call this in in_kube_health every min
+
+ cluster_labels = {
+ 'container.azm.ms/cluster-region' => 'eastus',
+ 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a',
+ 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test',
+ 'container.azm.ms/cluster-name' => 'dilipr-health-test'
+ }
+
+ cluster_id = 'fake_cluster_id'
+
+ provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json"))
+
+ aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider)
+ deduped_records = aggregator.dedupe_records(formatted_records)
+ aggregator.aggregate(deduped_records)
+ aggregator.compute_state
+ records = aggregator.get_records
+
+ #removed(missed) omsagent records should result in state being unknown
+ omsagent_record = records.select{|r| r["MonitorTypeId"] == MonitorId::CONTAINER_CPU_MONITOR_ID && r["Details"]["details"]["container"] == "omsagent" && !r["Details"]["details"]["workload_name"].include?("omsagent-rs") }[0]
+ omsagent_record['Details']['state'].must_equal HealthMonitorStates::UNKNOWN #limit is set
+ end
+end
\ No newline at end of file
diff --git a/test/code/plugin/health/health_container_cpu_memory_record_formatter_spec.rb b/test/code/plugin/health/health_container_cpu_memory_record_formatter_spec.rb
new file mode 100644
index 000000000..d01922bce
--- /dev/null
+++ b/test/code/plugin/health/health_container_cpu_memory_record_formatter_spec.rb
@@ -0,0 +1,58 @@
+require_relative '../test_helpers'
+Dir[File.join(File.expand_path(File.dirname(__FILE__)), "../../../../source/code/plugin/health/*.rb")].reject{|f| f.include?('health_monitor_utils')}.each { |file| require file }
+include HealthModel
+include Minitest
+
+describe "HealthContainerCpuMemoryRecordFormatter spec" do
+ it 'returns the record in expected format when cadvisor record is well formed' do
+ formatter = HealthContainerCpuMemoryRecordFormatter.new
+ cadvisor_record = JSON.parse('{
+ "DataItems": [
+ {
+ "Timestamp": "2019-08-01T23:19:19Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/6708e4ac-b49a-11e9-8a49-52a94e80d897/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 85143552
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ }')
+ record = formatter.get_record_from_cadvisor_record(cadvisor_record)
+ record.wont_equal nil
+ record["InstanceName"].must_equal "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/6708e4ac-b49a-11e9-8a49-52a94e80d897/omsagent"
+ record["CounterName"].must_equal "memoryWorkingSetBytes"
+ record["CounterValue"].must_equal 85143552
+ record["Timestamp"].must_equal "2019-08-01T23:19:19Z"
+ end
+
+ it 'returns nil for invalid cadvisor record' do
+ formatter = HealthContainerCpuMemoryRecordFormatter.new
+ cadvisor_record = JSON.parse('{
+ "DataItms": [
+ {
+ "Timestamp": "2019-08-01T23:19:19Z",
+ "Host": "aks-nodepool1-19574989-2",
+ "ObjectName": "K8SContainer",
+ "InstanceName": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test/6708e4ac-b49a-11e9-8a49-52a94e80d897/omsagent",
+ "Collections": [
+ {
+ "CounterName": "memoryWorkingSetBytes",
+ "Value": 85143552
+ }
+ ]
+ }
+ ],
+ "DataType": "LINUX_PERF_BLOB",
+ "IPName": "LogManagement"
+ }')
+ record = formatter.get_record_from_cadvisor_record(cadvisor_record)
+ record.must_be_nil
+ end
+end
\ No newline at end of file
diff --git a/test/code/plugin/health/health_kubernetes_resource_spec.rb b/test/code/plugin/health/health_kubernetes_resource_spec.rb
index c27d969ec..dbeec4858 100644
--- a/test/code/plugin/health/health_kubernetes_resource_spec.rb
+++ b/test/code/plugin/health/health_kubernetes_resource_spec.rb
@@ -207,7 +207,7 @@
resources = HealthKubernetesResources.instance
resources.node_inventory = nodes
resources.pod_inventory = pods
- resources.deployment_inventory = deployments
+ resources.set_deployment_inventory(deployments)
#act
parsed_nodes = resources.get_nodes
parsed_workloads = resources.get_workload_names
@@ -217,6 +217,28 @@
assert_equal parsed_workloads.size, 3
assert_equal parsed_nodes, ['aks-nodepool1-19574989-0', 'aks-nodepool1-19574989-1']
- assert_equal parsed_workloads, ['default~~diliprdeploymentnodeapps', 'default~~rss-site', 'kube-system~~kube-proxy']
+ parsed_workloads.sort.must_equal ['default~~diliprdeploymentnodeapps', 'default~~rss-site', 'kube-system~~kube-proxy'].sort
end
+
+ # it 'builds the pod_uid lookup correctly' do
+ # #arrange
+ # f = File.read('C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json')
+ # nodes = JSON.parse(f)
+ # f = File.read('C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json')
+ # pods = JSON.parse(f)
+ # f = File.read('C:/Users/dilipr/desktop/health/container_cpu_memory/deployments.json')
+ # deployments = JSON.parse(f)
+
+ # resources = HealthKubernetesResources.instance
+
+ # resources.node_inventory = nodes
+ # resources.pod_inventory = pods
+ # resources.set_deployment_inventory(deployments) #resets deployment_lookup -- this was causing Unit test failures
+
+ # resources.build_pod_uid_lookup
+
+ # resources.pod_uid_lookup
+ # resources.workload_container_count
+
+ # end
end
\ No newline at end of file
diff --git a/test/code/plugin/health/health_model_builder_test.rb b/test/code/plugin/health/health_model_builder_test.rb
index df921049c..a7c5e0927 100644
--- a/test/code/plugin/health/health_model_builder_test.rb
+++ b/test/code/plugin/health/health_model_builder_test.rb
@@ -64,10 +64,10 @@ def test_event_stream
resources = HealthKubernetesResources.instance
resources.node_inventory = node_inventory
resources.pod_inventory = pod_inventory
- resources.deployment_inventory = deployment_inventory
+ resources.set_deployment_inventory(deployment_inventory)
workload_names = resources.get_workload_names
- provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../..//installer/conf/healthmonitorconfig.json"))
+ provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json"))
health_monitor_records = []
records.each do |record|
@@ -334,4 +334,162 @@ def test_event_stream_aks_engine
after_state.initialize_state(deserialized_state)
end
end
+
+ def test_container_memory_cpu_with_model
+ health_definition_path = File.join(__dir__, '../../../../installer/conf/health_model_definition.json')
+ health_model_definition = ParentMonitorProvider.new(HealthModelDefinitionParser.new(health_definition_path).parse_file)
+ monitor_factory = MonitorFactory.new
+ hierarchy_builder = HealthHierarchyBuilder.new(health_model_definition, monitor_factory)
+ # TODO: Figure out if we need to add NodeMonitorHierarchyReducer to the list of finalizers. For now, dont compress/optimize, since it becomes impossible to construct the model on the UX side
+ state_finalizers = [AggregateMonitorStateFinalizer.new]
+ monitor_set = MonitorSet.new
+ model_builder = HealthModelBuilder.new(hierarchy_builder, state_finalizers, monitor_set)
+
+ nodes_file_map = {
+ "first" => "C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json",
+ "second" => "C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json",
+ "third" => "C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json",
+ }
+
+ pods_file_map = {
+ "first" => "C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json",
+ "second" => "C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json",
+ "third" => "C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json",
+ }
+
+ cluster_labels = {
+ 'container.azm.ms/cluster-region' => 'eastus',
+ 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a',
+ 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test',
+ 'container.azm.ms/cluster-name' => 'dilipr-health-test'
+ }
+
+ cluster_id = 'fake_cluster_id'
+
+ #test
+ state = HealthMonitorState.new()
+ generator = HealthMissingSignalGenerator.new
+
+ mock_data_path = "C:/Users/dilipr/desktop/health/container_cpu_memory/daemonset.json"
+ file = File.read(mock_data_path)
+ records = JSON.parse(file)
+
+ node_inventory = JSON.parse(File.read("C:/Users/dilipr/desktop/health/container_cpu_memory/nodes.json"))
+ pod_inventory = JSON.parse(File.read("C:/Users/dilipr/desktop/health/container_cpu_memory/pods.json"))
+ deployment_inventory = JSON.parse(File.read("C:/Users/dilipr/desktop/health/container_cpu_memory/deployments.json"))
+ resources = HealthKubernetesResources.instance
+ resources.node_inventory = node_inventory
+ resources.pod_inventory = pod_inventory
+ resources.set_deployment_inventory(deployment_inventory)
+
+ workload_names = resources.get_workload_names
+ provider = HealthMonitorProvider.new(cluster_id, cluster_labels, resources, File.join(__dir__, "../../../../installer/conf/healthmonitorconfig.json"))
+
+
+ #container memory cpu records
+ file = File.read('C:/Users/dilipr/desktop/health/container_cpu_memory/cadvisor_perf.json')
+ cadvisor_records = JSON.parse(file)
+ cadvisor_records = cadvisor_records.select{|record| record['DataItems'][0]['ObjectName'] == 'K8SContainer'}
+ formatted_records = []
+ formatter = HealthContainerCpuMemoryRecordFormatter.new
+ cadvisor_records.each{|record|
+ formatted_record = formatter.get_record_from_cadvisor_record(record)
+ formatted_records.push(formatted_record)
+ }
+
+ resources.build_pod_uid_lookup #call this in in_kube_health every min
+
+ cluster_labels = {
+ 'container.azm.ms/cluster-region' => 'eastus',
+ 'container.azm.ms/cluster-subscription-id' => '72c8e8ca-dc16-47dc-b65c-6b5875eb600a',
+ 'container.azm.ms/cluster-resource-group' => 'dilipr-health-test',
+ 'container.azm.ms/cluster-name' => 'dilipr-health-test'
+ }
+
+ cluster_id = 'fake_cluster_id'
+
+ aggregator = HealthContainerCpuMemoryAggregator.new(resources, provider)
+ deduped_records = aggregator.dedupe_records(formatted_records)
+ aggregator.aggregate(deduped_records)
+ aggregator.compute_state
+ container_cpu_memory_records = aggregator.get_records
+
+ records.concat(container_cpu_memory_records)
+
+ health_monitor_records = []
+ records.each do |record|
+ monitor_instance_id = record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID]
+ monitor_id = record[HealthMonitorRecordFields::MONITOR_ID]
+ health_monitor_record = HealthMonitorRecord.new(
+ record[HealthMonitorRecordFields::MONITOR_ID],
+ record[HealthMonitorRecordFields::MONITOR_INSTANCE_ID],
+ record[HealthMonitorRecordFields::TIME_FIRST_OBSERVED],
+ record[HealthMonitorRecordFields::DETAILS]["state"],
+ provider.get_labels(record),
+ provider.get_config(monitor_id),
+ record[HealthMonitorRecordFields::DETAILS]
+ )
+
+ state.update_state(health_monitor_record,
+ provider.get_config(health_monitor_record.monitor_id)
+ )
+
+ # get the health state based on the monitor's operational state
+ # update state calls updates the state of the monitor based on configuration and history of the the monitor records
+ health_monitor_record.state = state.get_state(monitor_instance_id).new_state
+ health_monitor_records.push(health_monitor_record)
+ #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}"
+ end
+
+ #handle kube api down
+ kube_api_down_handler = HealthKubeApiDownHandler.new
+ health_monitor_records = kube_api_down_handler.handle_kube_api_down(health_monitor_records)
+
+ # Dedupe daemonset signals
+ # Remove unit monitor signals for “gone” objects
+ reducer = HealthSignalReducer.new()
+ reduced_records = reducer.reduce_signals(health_monitor_records, resources)
+
+ cluster_id = 'fake_cluster_id'
+
+ #get the list of 'none' and 'unknown' signals
+ missing_signals = generator.get_missing_signals(cluster_id, reduced_records, resources, provider)
+ #update state for missing signals
+ missing_signals.each{|signal|
+ state.update_state(signal,
+ provider.get_config(signal.monitor_id)
+ )
+ }
+ generator.update_last_received_records(reduced_records)
+ reduced_records.push(*missing_signals)
+
+ # build the health model
+ all_records = reduced_records
+ model_builder.process_records(all_records)
+ all_monitors = model_builder.finalize_model
+
+ # update the state for aggregate monitors (unit monitors are updated above)
+ all_monitors.each{|monitor_instance_id, monitor|
+ if monitor.is_aggregate_monitor
+ state.update_state(monitor,
+ provider.get_config(monitor.monitor_id)
+ )
+ end
+
+ instance_state = state.get_state(monitor_instance_id)
+ #puts "#{monitor_instance_id} #{instance_state.new_state} #{instance_state.old_state} #{instance_state.should_send}"
+ should_send = instance_state.should_send
+
+ # always send cluster monitor as a heartbeat
+ if !should_send && monitor_instance_id != MonitorId::CLUSTER
+ all_monitors.delete(monitor_instance_id)
+ end
+ }
+
+ records_to_send = []
+ all_monitors.keys.each{|key|
+ record = provider.get_record(all_monitors[key], state)
+ #puts "#{record["MonitorInstanceId"]} #{record["OldState"]} #{record["NewState"]}"
+ }
+ end
end
\ No newline at end of file
diff --git a/test/code/plugin/health/nodes.json b/test/code/plugin/health/nodes.json
new file mode 100644
index 000000000..f1721e076
--- /dev/null
+++ b/test/code/plugin/health/nodes.json
@@ -0,0 +1,1966 @@
+{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "Node",
+ "metadata": {
+ "annotations": {
+ "node.alpha.kubernetes.io/ttl": "0",
+ "volumes.kubernetes.io/controller-managed-attach-detach": "true"
+ },
+ "creationTimestamp": "2019-03-12T16:40:36Z",
+ "labels": {
+ "agentpool": "nodepool1",
+ "beta.kubernetes.io/arch": "amd64",
+ "beta.kubernetes.io/instance-type": "Standard_DS1_v2",
+ "beta.kubernetes.io/os": "linux",
+ "failure-domain.beta.kubernetes.io/region": "eastus",
+ "failure-domain.beta.kubernetes.io/zone": "0",
+ "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus",
+ "kubernetes.io/hostname": "aks-nodepool1-19574989-0",
+ "kubernetes.io/role": "agent",
+ "node-role.kubernetes.io/agent": "",
+ "storageprofile": "managed",
+ "storagetier": "Premium_LRS"
+ },
+ "name": "aks-nodepool1-19574989-0",
+ "resourceVersion": "19068106",
+ "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-0",
+ "uid": "9012b16c-44e5-11e9-9920-423525a6b683"
+ },
+ "spec": {
+ "podCIDR": "10.244.1.0/24",
+ "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-0"
+ },
+ "status": {
+ "addresses": [
+ {
+ "address": "aks-nodepool1-19574989-0",
+ "type": "Hostname"
+ },
+ {
+ "address": "10.240.0.4",
+ "type": "InternalIP"
+ }
+ ],
+ "allocatable": {
+ "cpu": "940m",
+ "ephemeral-storage": "28043041951",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "2504708Ki",
+ "pods": "110"
+ },
+ "capacity": {
+ "cpu": "1",
+ "ephemeral-storage": "30428648Ki",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "3524612Ki",
+ "pods": "110"
+ },
+ "conditions": [
+ {
+ "lastHeartbeatTime": "2019-03-12T16:42:18Z",
+ "lastTransitionTime": "2019-03-12T16:42:18Z",
+ "message": "RouteController created a route",
+ "reason": "RouteCreated",
+ "status": "False",
+ "type": "NetworkUnavailable"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-07-29T08:16:01Z",
+ "message": "kubelet has sufficient disk space available",
+ "reason": "KubeletHasSufficientDisk",
+ "status": "False",
+ "type": "OutOfDisk"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-07-29T08:16:01Z",
+ "message": "kubelet has sufficient memory available",
+ "reason": "KubeletHasSufficientMemory",
+ "status": "False",
+ "type": "MemoryPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-07-29T08:16:01Z",
+ "message": "kubelet has no disk pressure",
+ "reason": "KubeletHasNoDiskPressure",
+ "status": "False",
+ "type": "DiskPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-03-12T16:40:36Z",
+ "message": "kubelet has sufficient PID available",
+ "reason": "KubeletHasSufficientPID",
+ "status": "False",
+ "type": "PIDPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-07-29T08:16:01Z",
+ "message": "kubelet is posting ready status. AppArmor enabled",
+ "reason": "KubeletReady",
+ "status": "True",
+ "type": "Ready"
+ }
+ ],
+ "daemonEndpoints": {
+ "kubeletEndpoint": {
+ "Port": 10250
+ }
+ },
+ "images": [
+ {
+ "names": [
+ "nickchase/rss-php-nginx@sha256:48da56a77fe4ecff4917121365d8e0ce615ebbdfe31f48a996255f5592894e2b",
+ "nickchase/rss-php-nginx:v1"
+ ],
+ "sizeBytes": 677038498
+ },
+ {
+ "names": [
+ "rdilip83/logeverysecond@sha256:6fe5624808609c507178a77f94384fb9794a4d6b7d102ed8016a4baf608164a1",
+ "rdilip83/logeverysecond:v2"
+ ],
+ "sizeBytes": 674931590
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "k8s.gcr.io/hyperkube-amd64:v1.11.8"
+ ],
+ "sizeBytes": 615263658
+ },
+ {
+ "names": [
+ "microsoft/oms@sha256:de83d1df24cb86a3a3110bd03abbd5704d7a7345565b1996f49ff001a3665385",
+ "microsoft/oms:healthpreview04262019"
+ ],
+ "sizeBytes": 514907213
+ },
+ {
+ "names": [
+ "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d",
+ "rdilip83/fixrubyerror:latest"
+ ],
+ "sizeBytes": 494068028
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019"
+ ],
+ "sizeBytes": 494067935
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019"
+ ],
+ "sizeBytes": 494067572
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019"
+ ],
+ "sizeBytes": 494067210
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd",
+ "rdilip83/cifeatureprod08192019:v1"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895",
+ "rdilip83/hc08192019:1"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483",
+ "rdilip83/health-rc:3"
+ ],
+ "sizeBytes": 494052863
+ },
+ {
+ "names": [
+ "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13",
+ "rdilip83/health_ci_feature_image:v1"
+ ],
+ "sizeBytes": 494052147
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c",
+ "rdilip83/healthrc:v3"
+ ],
+ "sizeBytes": 494052138
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019"
+ ],
+ "sizeBytes": 494052135
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7",
+ "rdilip83/healthrc:v2"
+ ],
+ "sizeBytes": 494051682
+ },
+ {
+ "names": [
+ "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea",
+ "rdilip83/healthmerge:v8"
+ ],
+ "sizeBytes": 494010139
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d",
+ "rdilip83/health-rc:1"
+ ],
+ "sizeBytes": 494000891
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:a0666957cccbfdf5784accd1133408bf017c28a6e694d9a2ae74da94eef2d285",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019"
+ ],
+ "sizeBytes": 493994261
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4",
+ "rdilip83/mergehealth:v3"
+ ],
+ "sizeBytes": 493988815
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:a3521e8f36e007b3cb949e0356a75394ac61fd2024ca1ec4827b8d54fb068534",
+ "rdilip83/mergehealth:v1"
+ ],
+ "sizeBytes": 493981585
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019"
+ ],
+ "sizeBytes": 493977357
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:9ebc410a36856176921dba81b5bd43132469209b315f52be346690435419b9bb"
+ ],
+ "sizeBytes": 493946790
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019"
+ ],
+ "sizeBytes": 493893635
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c",
+ "rdilip83/healthpreview06272019:latest"
+ ],
+ "sizeBytes": 493893633
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06252019-1@sha256:1561876cffe94433a569f29f5231548e039193ebaa7ec640d22439675179e43f",
+ "rdilip83/healthpreview06252019-1:latest"
+ ],
+ "sizeBytes": 493887387
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06252019@sha256:6597ff599a78ac452a4138dedb9e08c0ccd3e8b01594b033fd78ba9dbb41fe9e",
+ "rdilip83/healthpreview06252019:latest"
+ ],
+ "sizeBytes": 493887384
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06242019@sha256:c4f565d92086d1ee56e6016178fed5c668352dc0ca0047f02910bdcb87a482c4",
+ "rdilip83/healthpreview06242019:latest"
+ ],
+ "sizeBytes": 493850850
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06212019-1@sha256:937ce5801a0097a1cbc4eff5399c1973b4c6223ece9279b35207368b99f82b96",
+ "rdilip83/healthpreview06212019-1:latest"
+ ],
+ "sizeBytes": 493850674
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06192019@sha256:f92cb5283814d446f0acde6a489648ea197496d5f85b27ca959ec97bce742d8a",
+ "rdilip83/healthpreview06192019:latest"
+ ],
+ "sizeBytes": 493799437
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0f798cb7d56931b231f71e38e7fa5bf898b69e611247a566701f70a5f29a9799",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07092019"
+ ],
+ "sizeBytes": 467692116
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:3734a084fa9681c7e930eb90cad45a8f282c24af63065a720a2327b1683f3ba4",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142019"
+ ],
+ "sizeBytes": 466882569
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:16402c34e2d7de72c2ebc18ec8e9f7933fa25f6a7f83bceb84483ba95e3902f7",
+ "rdilip83/mergehealth:v2"
+ ],
+ "sizeBytes": 448931997
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06212019@sha256:5860c9caaf544f2e7c46edad5cdfb69e22398e20dc87cb8a4cd630b5b7000074",
+ "rdilip83/healthpreview06212019:latest"
+ ],
+ "sizeBytes": 448366491
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41",
+ "aksrepos.azurecr.io/prod/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41",
+ "aksrepos.azurecr.io/mirror/hcp-tunnel-front:v1.9.2-v4.0.7",
+ "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7"
+ ],
+ "sizeBytes": 383483267
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747",
+ "k8s.gcr.io/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747",
+ "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1",
+ "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1"
+ ],
+ "sizeBytes": 121711221
+ },
+ {
+ "names": [
+ "nginx@sha256:23b4dcdf0d34d4a129755fc6f52e1c6e23bb34ea011b315d87e193033bcd1b68"
+ ],
+ "sizeBytes": 109331233
+ },
+ {
+ "names": [
+ "nginx@sha256:bdbf36b7f1f77ffe7bd2a32e59235dff6ecf131e3b6b5b96061c652f30685f3a",
+ "nginx:latest"
+ ],
+ "sizeBytes": 109258867
+ },
+ {
+ "names": [
+ "nginx@sha256:b73f527d86e3461fd652f62cf47e7b375196063bbbd503e853af5be16597cb2e",
+ "nginx:1.15.5"
+ ],
+ "sizeBytes": 109083698
+ },
+ {
+ "names": [
+ "debian@sha256:118cf8f3557e1ea766c02f36f05f6ac3e63628427ea8965fb861be904ec35a6f",
+ "debian:latest"
+ ],
+ "sizeBytes": 100594230
+ },
+ {
+ "names": [
+ "nginx@sha256:e3456c851a152494c3e4ff5fcc26f240206abac0c9d794affb40e0714846c451",
+ "nginx:1.7.9"
+ ],
+ "sizeBytes": 91664166
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2",
+ "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2"
+ ],
+ "sizeBytes": 82897218
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a",
+ "k8s.gcr.io/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a",
+ "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3",
+ "k8s.gcr.io/heapster-amd64:v1.5.3"
+ ],
+ "sizeBytes": 75318342
+ },
+ {
+ "names": [
+ "vishiy/hello@sha256:99d60766e39df52d28fe8db9c659633d96ba1d84fd672298dce047d8a86c478a",
+ "vishiy/hello:err100eps"
+ ],
+ "sizeBytes": 54649865
+ },
+ {
+ "names": [
+ "k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52",
+ "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13"
+ ],
+ "sizeBytes": 51157394
+ },
+ {
+ "names": [
+ "k8s.gcr.io/metrics-server-amd64@sha256:49a9f12f7067d11f42c803dbe61ed2c1299959ad85cb315b25ff7eef8e6b8892",
+ "k8s.gcr.io/metrics-server-amd64:v0.2.1"
+ ],
+ "sizeBytes": 42541759
+ },
+ {
+ "names": [
+ "k8s.gcr.io/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4",
+ "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10"
+ ],
+ "sizeBytes": 41635309
+ },
+ {
+ "names": [
+ "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8",
+ "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10"
+ ],
+ "sizeBytes": 40372149
+ }
+ ],
+ "nodeInfo": {
+ "architecture": "amd64",
+ "bootID": "d8f6c00f-a085-450e-bf5c-12e651a0fcfc",
+ "containerRuntimeVersion": "docker://3.0.4",
+ "kernelVersion": "4.15.0-1037-azure",
+ "kubeProxyVersion": "v1.11.8",
+ "kubeletVersion": "v1.11.8",
+ "machineID": "cc9ed99e383540a4b0379995bb779221",
+ "operatingSystem": "linux",
+ "osImage": "Ubuntu 16.04.5 LTS",
+ "systemUUID": "301B3B88-C7BD-3D45-A3CB-3CD66A42EB6F"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Node",
+ "metadata": {
+ "annotations": {
+ "node.alpha.kubernetes.io/ttl": "0",
+ "volumes.kubernetes.io/controller-managed-attach-detach": "true"
+ },
+ "creationTimestamp": "2019-03-12T16:40:33Z",
+ "labels": {
+ "agentpool": "nodepool1",
+ "beta.kubernetes.io/arch": "amd64",
+ "beta.kubernetes.io/instance-type": "Standard_DS1_v2",
+ "beta.kubernetes.io/os": "linux",
+ "failure-domain.beta.kubernetes.io/region": "eastus",
+ "failure-domain.beta.kubernetes.io/zone": "1",
+ "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus",
+ "kubernetes.io/hostname": "aks-nodepool1-19574989-1",
+ "kubernetes.io/role": "agent",
+ "node-role.kubernetes.io/agent": "",
+ "storageprofile": "managed",
+ "storagetier": "Premium_LRS"
+ },
+ "name": "aks-nodepool1-19574989-1",
+ "resourceVersion": "19068104",
+ "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-1",
+ "uid": "8e1b5c77-44e5-11e9-9920-423525a6b683"
+ },
+ "spec": {
+ "podCIDR": "10.244.0.0/24",
+ "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-1"
+ },
+ "status": {
+ "addresses": [
+ {
+ "address": "aks-nodepool1-19574989-1",
+ "type": "Hostname"
+ },
+ {
+ "address": "10.240.0.5",
+ "type": "InternalIP"
+ }
+ ],
+ "allocatable": {
+ "cpu": "940m",
+ "ephemeral-storage": "28043041951",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "2504708Ki",
+ "pods": "110"
+ },
+ "capacity": {
+ "cpu": "1",
+ "ephemeral-storage": "30428648Ki",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "3524612Ki",
+ "pods": "110"
+ },
+ "conditions": [
+ {
+ "lastHeartbeatTime": "2019-03-12T16:42:30Z",
+ "lastTransitionTime": "2019-03-12T16:42:30Z",
+ "message": "RouteController created a route",
+ "reason": "RouteCreated",
+ "status": "False",
+ "type": "NetworkUnavailable"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:21Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet has sufficient disk space available",
+ "reason": "KubeletHasSufficientDisk",
+ "status": "False",
+ "type": "OutOfDisk"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:21Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet has sufficient memory available",
+ "reason": "KubeletHasSufficientMemory",
+ "status": "False",
+ "type": "MemoryPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:21Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet has no disk pressure",
+ "reason": "KubeletHasNoDiskPressure",
+ "status": "False",
+ "type": "DiskPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:21Z",
+ "lastTransitionTime": "2019-03-12T16:40:33Z",
+ "message": "kubelet has sufficient PID available",
+ "reason": "KubeletHasSufficientPID",
+ "status": "False",
+ "type": "PIDPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:21Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet is posting ready status. AppArmor enabled",
+ "reason": "KubeletReady",
+ "status": "True",
+ "type": "Ready"
+ }
+ ],
+ "daemonEndpoints": {
+ "kubeletEndpoint": {
+ "Port": 10250
+ }
+ },
+ "images": [
+ {
+ "names": [
+ "perl@sha256:268e7af9853bcc6d2100e2ad76e928c2ca861518217c269b8a762849a8617c12",
+ "perl:latest"
+ ],
+ "sizeBytes": 890592834
+ },
+ {
+ "names": [
+ "nickchase/rss-php-nginx@sha256:48da56a77fe4ecff4917121365d8e0ce615ebbdfe31f48a996255f5592894e2b",
+ "nickchase/rss-php-nginx:v1"
+ ],
+ "sizeBytes": 677038498
+ },
+ {
+ "names": [
+ "rdilip83/jsonlogger@sha256:82b67ca5e0650cd5e47f5b51659d61cee035e5d8dcd8a79c50358cd2beb3b5a8",
+ "rdilip83/jsonlogger:v12"
+ ],
+ "sizeBytes": 676594134
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "k8s.gcr.io/hyperkube-amd64:v1.11.8"
+ ],
+ "sizeBytes": 615263658
+ },
+ {
+ "names": [
+ "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d",
+ "rdilip83/fixrubyerror:latest"
+ ],
+ "sizeBytes": 494068028
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019"
+ ],
+ "sizeBytes": 494067935
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019"
+ ],
+ "sizeBytes": 494067572
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019"
+ ],
+ "sizeBytes": 494067210
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd",
+ "rdilip83/cifeatureprod08192019:v1"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895",
+ "rdilip83/hc08192019:1"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483",
+ "rdilip83/health-rc:3"
+ ],
+ "sizeBytes": 494052863
+ },
+ {
+ "names": [
+ "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13",
+ "rdilip83/health_ci_feature_image:v1"
+ ],
+ "sizeBytes": 494052147
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c",
+ "rdilip83/healthrc:v3"
+ ],
+ "sizeBytes": 494052138
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019"
+ ],
+ "sizeBytes": 494052135
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7",
+ "rdilip83/healthrc:v2"
+ ],
+ "sizeBytes": 494051682
+ },
+ {
+ "names": [
+ "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea",
+ "rdilip83/healthmerge:v8"
+ ],
+ "sizeBytes": 494010139
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d",
+ "rdilip83/health-rc:1"
+ ],
+ "sizeBytes": 494000891
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:a0666957cccbfdf5784accd1133408bf017c28a6e694d9a2ae74da94eef2d285",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019"
+ ],
+ "sizeBytes": 493994261
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4",
+ "rdilip83/mergehealth:v3"
+ ],
+ "sizeBytes": 493988815
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:a3521e8f36e007b3cb949e0356a75394ac61fd2024ca1ec4827b8d54fb068534",
+ "rdilip83/mergehealth:v1"
+ ],
+ "sizeBytes": 493981585
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019"
+ ],
+ "sizeBytes": 493977357
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:9ebc410a36856176921dba81b5bd43132469209b315f52be346690435419b9bb"
+ ],
+ "sizeBytes": 493946790
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019"
+ ],
+ "sizeBytes": 493893635
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c",
+ "rdilip83/healthpreview06272019:latest"
+ ],
+ "sizeBytes": 493893633
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06252019-1@sha256:1561876cffe94433a569f29f5231548e039193ebaa7ec640d22439675179e43f",
+ "rdilip83/healthpreview06252019-1:latest"
+ ],
+ "sizeBytes": 493887387
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06252019@sha256:6597ff599a78ac452a4138dedb9e08c0ccd3e8b01594b033fd78ba9dbb41fe9e",
+ "rdilip83/healthpreview06252019:latest"
+ ],
+ "sizeBytes": 493887384
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06242019@sha256:c4f565d92086d1ee56e6016178fed5c668352dc0ca0047f02910bdcb87a482c4",
+ "rdilip83/healthpreview06242019:latest"
+ ],
+ "sizeBytes": 493850850
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06212019-1@sha256:937ce5801a0097a1cbc4eff5399c1973b4c6223ece9279b35207368b99f82b96",
+ "rdilip83/healthpreview06212019-1:latest"
+ ],
+ "sizeBytes": 493850674
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0f798cb7d56931b231f71e38e7fa5bf898b69e611247a566701f70a5f29a9799",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07092019"
+ ],
+ "sizeBytes": 467692116
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:3734a084fa9681c7e930eb90cad45a8f282c24af63065a720a2327b1683f3ba4",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142019"
+ ],
+ "sizeBytes": 466882569
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:16402c34e2d7de72c2ebc18ec8e9f7933fa25f6a7f83bceb84483ba95e3902f7",
+ "rdilip83/mergehealth:v2"
+ ],
+ "sizeBytes": 448931997
+ },
+ {
+ "names": [
+ "deis/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41",
+ "deis/hcp-tunnel-front:v1.9.2-v4.0.7"
+ ],
+ "sizeBytes": 383483267
+ },
+ {
+ "names": [
+ "nginx@sha256:23b4dcdf0d34d4a129755fc6f52e1c6e23bb34ea011b315d87e193033bcd1b68"
+ ],
+ "sizeBytes": 109331233
+ },
+ {
+ "names": [
+ "nginx@sha256:bdbf36b7f1f77ffe7bd2a32e59235dff6ecf131e3b6b5b96061c652f30685f3a",
+ "nginx:latest"
+ ],
+ "sizeBytes": 109258867
+ },
+ {
+ "names": [
+ "debian@sha256:118cf8f3557e1ea766c02f36f05f6ac3e63628427ea8965fb861be904ec35a6f",
+ "debian:latest"
+ ],
+ "sizeBytes": 100594230
+ },
+ {
+ "names": [
+ "nginx@sha256:e3456c851a152494c3e4ff5fcc26f240206abac0c9d794affb40e0714846c451",
+ "nginx:1.7.9"
+ ],
+ "sizeBytes": 91664166
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2",
+ "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2"
+ ],
+ "sizeBytes": 82897218
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a",
+ "k8s.gcr.io/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a",
+ "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3",
+ "k8s.gcr.io/heapster-amd64:v1.5.3"
+ ],
+ "sizeBytes": 75318342
+ },
+ {
+ "names": [
+ "vishiy/hello@sha256:99d60766e39df52d28fe8db9c659633d96ba1d84fd672298dce047d8a86c478a",
+ "vishiy/hello:err100eps"
+ ],
+ "sizeBytes": 54649865
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52",
+ "k8s.gcr.io/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52",
+ "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13",
+ "k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.13"
+ ],
+ "sizeBytes": 51157394
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-proportional-autoscaler-amd64@sha256:003f98d9f411ddfa6ff6d539196355e03ddd69fa4ed38c7ffb8fec6f729afe2d",
+ "k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2"
+ ],
+ "sizeBytes": 49648481
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/metrics-server-amd64@sha256:220c0ed3451cb95e4b2f72dd5dc8d9d39d9f529722e5b29d8286373ce27b117e",
+ "k8s.gcr.io/metrics-server-amd64@sha256:49a9f12f7067d11f42c803dbe61ed2c1299959ad85cb315b25ff7eef8e6b8892",
+ "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1",
+ "k8s.gcr.io/metrics-server-amd64:v0.2.1"
+ ],
+ "sizeBytes": 42541759
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4",
+ "k8s.gcr.io/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4",
+ "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10",
+ "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.10"
+ ],
+ "sizeBytes": 41635309
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8",
+ "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8",
+ "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
+ "k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.10"
+ ],
+ "sizeBytes": 40372149
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/addon-resizer@sha256:8ac3ffa4232046feb297cefc40734641fa2954c16308f9e0d70ec152f22231ca",
+ "k8s.gcr.io/addon-resizer@sha256:507aa9845ecce1fdde4d61f530c802f4dc2974c700ce0db7730866e442db958d",
+ "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1",
+ "k8s.gcr.io/addon-resizer:1.8.1"
+ ],
+ "sizeBytes": 32968591
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d",
+ "nginx@sha256:9d46fd628d54ebe1633ee3cf0fe2acfcc419cfae541c63056530e39cd5620366",
+ "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "nginx:1.13.12-alpine"
+ ],
+ "sizeBytes": 18002931
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/exechealthz-amd64@sha256:34722333f0cd0b891b61c9e0efa31913f22157e341a3aabb79967305d4e78260",
+ "k8s.gcr.io/exechealthz-amd64@sha256:503e158c3f65ed7399f54010571c7c977ade7fe59010695f48d9650d83488c0a",
+ "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2",
+ "k8s.gcr.io/exechealthz-amd64:1.2"
+ ],
+ "sizeBytes": 8374840
+ }
+ ],
+ "nodeInfo": {
+ "architecture": "amd64",
+ "bootID": "4c822e6d-c2e5-4697-9a01-467e04804fc1",
+ "containerRuntimeVersion": "docker://3.0.4",
+ "kernelVersion": "4.15.0-1037-azure",
+ "kubeProxyVersion": "v1.11.8",
+ "kubeletVersion": "v1.11.8",
+ "machineID": "1954026de5e6436788f214eb0dfd6a13",
+ "operatingSystem": "linux",
+ "osImage": "Ubuntu 16.04.5 LTS",
+ "systemUUID": "17A6A78E-D3E2-2A4F-852B-C91D933C8D5B"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Node",
+ "metadata": {
+ "annotations": {
+ "node.alpha.kubernetes.io/ttl": "0",
+ "volumes.kubernetes.io/controller-managed-attach-detach": "true"
+ },
+ "creationTimestamp": "2019-06-21T02:01:53Z",
+ "labels": {
+ "agentpool": "nodepool1",
+ "beta.kubernetes.io/arch": "amd64",
+ "beta.kubernetes.io/instance-type": "Standard_DS1_v2",
+ "beta.kubernetes.io/os": "linux",
+ "failure-domain.beta.kubernetes.io/region": "eastus",
+ "failure-domain.beta.kubernetes.io/zone": "0",
+ "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus",
+ "kubernetes.io/hostname": "aks-nodepool1-19574989-2",
+ "kubernetes.io/role": "agent",
+ "node-role.kubernetes.io/agent": "",
+ "storageprofile": "managed",
+ "storagetier": "Premium_LRS"
+ },
+ "name": "aks-nodepool1-19574989-2",
+ "resourceVersion": "19068101",
+ "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-2",
+ "uid": "8a62e1bc-93c8-11e9-854d-ee76584a3c00"
+ },
+ "spec": {
+ "podCIDR": "10.244.12.0/24",
+ "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-2"
+ },
+ "status": {
+ "addresses": [
+ {
+ "address": "aks-nodepool1-19574989-2",
+ "type": "Hostname"
+ },
+ {
+ "address": "10.240.0.7",
+ "type": "InternalIP"
+ }
+ ],
+ "allocatable": {
+ "cpu": "940m",
+ "ephemeral-storage": "28043041951",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "2480548Ki",
+ "pods": "110"
+ },
+ "capacity": {
+ "cpu": "1",
+ "ephemeral-storage": "30428648Ki",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "3500452Ki",
+ "pods": "110"
+ },
+ "conditions": [
+ {
+ "lastHeartbeatTime": "2019-06-21T02:02:24Z",
+ "lastTransitionTime": "2019-06-21T02:02:24Z",
+ "message": "RouteController created a route",
+ "reason": "RouteCreated",
+ "status": "False",
+ "type": "NetworkUnavailable"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:20Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet has sufficient disk space available",
+ "reason": "KubeletHasSufficientDisk",
+ "status": "False",
+ "type": "OutOfDisk"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:20Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet has sufficient memory available",
+ "reason": "KubeletHasSufficientMemory",
+ "status": "False",
+ "type": "MemoryPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:20Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet has no disk pressure",
+ "reason": "KubeletHasNoDiskPressure",
+ "status": "False",
+ "type": "DiskPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:20Z",
+ "lastTransitionTime": "2019-06-21T02:01:53Z",
+ "message": "kubelet has sufficient PID available",
+ "reason": "KubeletHasSufficientPID",
+ "status": "False",
+ "type": "PIDPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:20Z",
+ "lastTransitionTime": "2019-07-23T14:46:10Z",
+ "message": "kubelet is posting ready status. AppArmor enabled",
+ "reason": "KubeletReady",
+ "status": "True",
+ "type": "Ready"
+ }
+ ],
+ "daemonEndpoints": {
+ "kubeletEndpoint": {
+ "Port": 10250
+ }
+ },
+ "images": [
+ {
+ "names": [
+ "nickchase/rss-php-nginx@sha256:48da56a77fe4ecff4917121365d8e0ce615ebbdfe31f48a996255f5592894e2b",
+ "nickchase/rss-php-nginx:v1"
+ ],
+ "sizeBytes": 677038498
+ },
+ {
+ "names": [
+ "rdilip83/jsonlogger@sha256:82b67ca5e0650cd5e47f5b51659d61cee035e5d8dcd8a79c50358cd2beb3b5a8",
+ "rdilip83/jsonlogger:v12"
+ ],
+ "sizeBytes": 676594134
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "k8s.gcr.io/hyperkube-amd64:v1.11.8"
+ ],
+ "sizeBytes": 615263658
+ },
+ {
+ "names": [
+ "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d",
+ "rdilip83/fixrubyerror:latest"
+ ],
+ "sizeBytes": 494068028
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019"
+ ],
+ "sizeBytes": 494067935
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019"
+ ],
+ "sizeBytes": 494067572
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019"
+ ],
+ "sizeBytes": 494067210
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd",
+ "rdilip83/cifeatureprod08192019:v1"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895",
+ "rdilip83/hc08192019:1"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483",
+ "rdilip83/health-rc:3"
+ ],
+ "sizeBytes": 494052863
+ },
+ {
+ "names": [
+ "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13",
+ "rdilip83/health_ci_feature_image:v1"
+ ],
+ "sizeBytes": 494052147
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c",
+ "rdilip83/healthrc:v3"
+ ],
+ "sizeBytes": 494052138
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019"
+ ],
+ "sizeBytes": 494052135
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7",
+ "rdilip83/healthrc:v2"
+ ],
+ "sizeBytes": 494051682
+ },
+ {
+ "names": [
+ "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea",
+ "rdilip83/healthmerge:v8"
+ ],
+ "sizeBytes": 494010139
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d",
+ "rdilip83/health-rc:1"
+ ],
+ "sizeBytes": 494000891
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:a0666957cccbfdf5784accd1133408bf017c28a6e694d9a2ae74da94eef2d285",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview08012019"
+ ],
+ "sizeBytes": 493994261
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4",
+ "rdilip83/mergehealth:v3"
+ ],
+ "sizeBytes": 493988815
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:a3521e8f36e007b3cb949e0356a75394ac61fd2024ca1ec4827b8d54fb068534",
+ "rdilip83/mergehealth:v1"
+ ],
+ "sizeBytes": 493981585
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019"
+ ],
+ "sizeBytes": 493977357
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:9ebc410a36856176921dba81b5bd43132469209b315f52be346690435419b9bb"
+ ],
+ "sizeBytes": 493946790
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019"
+ ],
+ "sizeBytes": 493893635
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c",
+ "rdilip83/healthpreview06272019:latest"
+ ],
+ "sizeBytes": 493893633
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06252019-1@sha256:1561876cffe94433a569f29f5231548e039193ebaa7ec640d22439675179e43f",
+ "rdilip83/healthpreview06252019-1:latest"
+ ],
+ "sizeBytes": 493887387
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06252019@sha256:6597ff599a78ac452a4138dedb9e08c0ccd3e8b01594b033fd78ba9dbb41fe9e",
+ "rdilip83/healthpreview06252019:latest"
+ ],
+ "sizeBytes": 493887384
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06242019@sha256:c4f565d92086d1ee56e6016178fed5c668352dc0ca0047f02910bdcb87a482c4",
+ "rdilip83/healthpreview06242019:latest"
+ ],
+ "sizeBytes": 493850850
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06212019-1@sha256:937ce5801a0097a1cbc4eff5399c1973b4c6223ece9279b35207368b99f82b96",
+ "rdilip83/healthpreview06212019-1:latest"
+ ],
+ "sizeBytes": 493850674
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06192019@sha256:f92cb5283814d446f0acde6a489648ea197496d5f85b27ca959ec97bce742d8a",
+ "rdilip83/healthpreview06192019:latest"
+ ],
+ "sizeBytes": 493799437
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0f798cb7d56931b231f71e38e7fa5bf898b69e611247a566701f70a5f29a9799",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod07092019"
+ ],
+ "sizeBytes": 467692116
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:3734a084fa9681c7e930eb90cad45a8f282c24af63065a720a2327b1683f3ba4",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod06142019"
+ ],
+ "sizeBytes": 466882569
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:16402c34e2d7de72c2ebc18ec8e9f7933fa25f6a7f83bceb84483ba95e3902f7",
+ "rdilip83/mergehealth:v2"
+ ],
+ "sizeBytes": 448931997
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06212019@sha256:5860c9caaf544f2e7c46edad5cdfb69e22398e20dc87cb8a4cd630b5b7000074",
+ "rdilip83/healthpreview06212019:latest"
+ ],
+ "sizeBytes": 448366491
+ },
+ {
+ "names": [
+ "deis/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41",
+ "deis/hcp-tunnel-front:v1.9.2-v4.0.7"
+ ],
+ "sizeBytes": 383483267
+ },
+ {
+ "names": [
+ "progrium/stress@sha256:e34d56d60f5caae79333cee395aae93b74791d50e3841986420d23c2ee4697bf",
+ "progrium/stress:latest"
+ ],
+ "sizeBytes": 281783943
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:b6834bb69e8fad88110b1dc57097a45bc79e6f2c5f2c2773c871d07389794771",
+ "k8s.gcr.io/cluster-autoscaler:v1.12.3"
+ ],
+ "sizeBytes": 232229241
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:dc5744fd8c22aebfe40d6b62ab97d18d7bfbfc7ab1782509d69a5a9ec514df2c",
+ "k8s.gcr.io/cluster-autoscaler:v1.12.2"
+ ],
+ "sizeBytes": 232167833
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:e71851267764a068fbb091a4ef3bb874b5ce34db48cb757fcf77779f30ef0207",
+ "k8s.gcr.io/cluster-autoscaler:v1.3.7"
+ ],
+ "sizeBytes": 217353965
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:36a369ca4643542d501bce0addf8b903f2141ae9e2608662b77a3d24f01d7780",
+ "k8s.gcr.io/cluster-autoscaler:v1.2.2"
+ ],
+ "sizeBytes": 208688449
+ },
+ {
+ "names": [
+ "containernetworking/azure-npm@sha256:4735da6dc0d5393d68be72498f5ce563cb930fa21b26faec8fdc844001057a56",
+ "containernetworking/azure-npm:v1.0.18"
+ ],
+ "sizeBytes": 170727162
+ },
+ {
+ "names": [
+ "containernetworking/networkmonitor@sha256:d875511410502c3e37804e1f313cc2b0a03d7a03d3d5e6adaf8994b753a76f8e",
+ "containernetworking/networkmonitor:v0.0.6"
+ ],
+ "sizeBytes": 123663837
+ },
+ {
+ "names": [
+ "containernetworking/networkmonitor@sha256:944408a497c451b0e79d2596dc2e9fe5036cdbba7fa831bff024e1c9ed44190d",
+ "containernetworking/networkmonitor:v0.0.5"
+ ],
+ "sizeBytes": 122043325
+ },
+ {
+ "names": [
+ "nginx@sha256:bdbf36b7f1f77ffe7bd2a32e59235dff6ecf131e3b6b5b96061c652f30685f3a",
+ "nginx:latest"
+ ],
+ "sizeBytes": 109258867
+ },
+ {
+ "names": [
+ "debian@sha256:118cf8f3557e1ea766c02f36f05f6ac3e63628427ea8965fb861be904ec35a6f",
+ "debian:latest"
+ ],
+ "sizeBytes": 100594230
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kube-addon-manager-amd64@sha256:3da3f17cd4f02fe5696f29a5e6cd4aef7111f20dab9bec54ea35942346cfeb60",
+ "k8s.gcr.io/kube-addon-manager-amd64:v8.8"
+ ],
+ "sizeBytes": 99631084
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kube-addon-manager-amd64@sha256:672794ee3582521eb8bc4f257d0f70c92893f1989f39a200f9c84bcfe1aea7c9",
+ "k8s.gcr.io/kube-addon-manager-amd64:v9.0"
+ ],
+ "sizeBytes": 83077558
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2",
+ "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2"
+ ],
+ "sizeBytes": 82897218
+ },
+ {
+ "names": [
+ "k8s.gcr.io/heapster-amd64@sha256:dccaabb0c20cf05c29baefa1e9bf0358b083ccc0fab492b9b3b47fb7e4db5472",
+ "k8s.gcr.io/heapster-amd64:v1.5.4"
+ ],
+ "sizeBytes": 75318342
+ }
+ ],
+ "nodeInfo": {
+ "architecture": "amd64",
+ "bootID": "ee529550-afa8-43bb-90a6-f157e7e22e18",
+ "containerRuntimeVersion": "docker://3.0.4",
+ "kernelVersion": "4.15.0-1045-azure",
+ "kubeProxyVersion": "v1.11.8",
+ "kubeletVersion": "v1.11.8",
+ "machineID": "0e5d932888da4e17a3c58210f6c8c9db",
+ "operatingSystem": "linux",
+ "osImage": "Ubuntu 16.04.6 LTS",
+ "systemUUID": "5DBFC273-947F-0140-AD1F-BF6758D30B37"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Node",
+ "metadata": {
+ "annotations": {
+ "node.alpha.kubernetes.io/ttl": "0",
+ "volumes.kubernetes.io/controller-managed-attach-detach": "true"
+ },
+ "creationTimestamp": "2019-08-07T18:57:56Z",
+ "labels": {
+ "agentpool": "nodepool1",
+ "beta.kubernetes.io/arch": "amd64",
+ "beta.kubernetes.io/instance-type": "Standard_DS1_v2",
+ "beta.kubernetes.io/os": "linux",
+ "failure-domain.beta.kubernetes.io/region": "eastus",
+ "failure-domain.beta.kubernetes.io/zone": "1",
+ "kubernetes.azure.com/cluster": "MC_dilipr-health-test_dilipr-health-test_eastus",
+ "kubernetes.io/hostname": "aks-nodepool1-19574989-3",
+ "kubernetes.io/role": "agent",
+ "node-role.kubernetes.io/agent": "",
+ "storageprofile": "managed",
+ "storagetier": "Premium_LRS"
+ },
+ "name": "aks-nodepool1-19574989-3",
+ "resourceVersion": "19068105",
+ "selfLink": "/api/v1/nodes/aks-nodepool1-19574989-3",
+ "uid": "448ea0a7-b945-11e9-a1b6-127094e7fd94"
+ },
+ "spec": {
+ "podCIDR": "10.244.2.0/24",
+ "providerID": "azure:///subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourceGroups/MC_dilipr-health-test_dilipr-health-test_eastus/providers/Microsoft.Compute/virtualMachines/aks-nodepool1-19574989-3"
+ },
+ "status": {
+ "addresses": [
+ {
+ "address": "aks-nodepool1-19574989-3",
+ "type": "Hostname"
+ },
+ {
+ "address": "10.240.0.6",
+ "type": "InternalIP"
+ }
+ ],
+ "allocatable": {
+ "cpu": "940m",
+ "ephemeral-storage": "28043041951",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "2480544Ki",
+ "pods": "110"
+ },
+ "capacity": {
+ "cpu": "1",
+ "ephemeral-storage": "30428648Ki",
+ "hugepages-1Gi": "0",
+ "hugepages-2Mi": "0",
+ "memory": "3500448Ki",
+ "pods": "110"
+ },
+ "conditions": [
+ {
+ "lastHeartbeatTime": "2019-08-07T18:59:32Z",
+ "lastTransitionTime": "2019-08-07T18:59:32Z",
+ "message": "RouteController created a route",
+ "reason": "RouteCreated",
+ "status": "False",
+ "type": "NetworkUnavailable"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-08-07T18:57:56Z",
+ "message": "kubelet has sufficient disk space available",
+ "reason": "KubeletHasSufficientDisk",
+ "status": "False",
+ "type": "OutOfDisk"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-08-07T18:57:56Z",
+ "message": "kubelet has sufficient memory available",
+ "reason": "KubeletHasSufficientMemory",
+ "status": "False",
+ "type": "MemoryPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-08-07T18:57:56Z",
+ "message": "kubelet has no disk pressure",
+ "reason": "KubeletHasNoDiskPressure",
+ "status": "False",
+ "type": "DiskPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-08-07T18:57:56Z",
+ "message": "kubelet has sufficient PID available",
+ "reason": "KubeletHasSufficientPID",
+ "status": "False",
+ "type": "PIDPressure"
+ },
+ {
+ "lastHeartbeatTime": "2019-08-23T20:43:22Z",
+ "lastTransitionTime": "2019-08-07T18:58:06Z",
+ "message": "kubelet is posting ready status. AppArmor enabled",
+ "reason": "KubeletReady",
+ "status": "True",
+ "type": "Ready"
+ }
+ ],
+ "daemonEndpoints": {
+ "kubeletEndpoint": {
+ "Port": 10250
+ }
+ },
+ "images": [
+ {
+ "names": [
+ "deis/hcp-tunnel-front@sha256:a067679f0ab376197a344cd410821cf07d69fc322dcd9af4a9229250da725ce2",
+ "deis/hcp-tunnel-front:v1.9.2-v4.0.4"
+ ],
+ "sizeBytes": 640504769
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "k8s.gcr.io/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "k8s.gcr.io/hyperkube-amd64:v1.11.8"
+ ],
+ "sizeBytes": 615263658
+ },
+ {
+ "names": [
+ "rdilip83/fixrubyerror@sha256:6b7f36cf6258b311015493ab025f06577d758c45bc5010d022ac160b9f40ea5d",
+ "rdilip83/fixrubyerror:latest"
+ ],
+ "sizeBytes": 494068028
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019"
+ ],
+ "sizeBytes": 494067935
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:fb2b90ce9bf7186fd9dfae97f5f72f9b9c80c8a0493af3cff74179cd4ff847c0",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08212019"
+ ],
+ "sizeBytes": 494067572
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c646e180483d295ffac114fb9df513db02553af7879681814d5910764653dd2d",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08202019"
+ ],
+ "sizeBytes": 494067210
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:c21b596a22a1338ed293d01681f327acc871ee502ed779ec1109d6a93375bb3b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08192019"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "rdilip83/cifeatureprod08192019@sha256:7815bba9a805e4e8df33356fd532671de45525ce9c6e936e14f9b126e2097ecd",
+ "rdilip83/cifeatureprod08192019:v1"
+ ],
+ "sizeBytes": 494055088
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:6387d0dedf4de0bab430f681ef61361f63a20e1c4c287a9b60ea5460283ac6cf",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ci_feature_prod_health08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/hc08192019@sha256:014d936771508d499ac4c15043e23b16bce8de0019fb2048b99540cbe9084895",
+ "rdilip83/hc08192019:1"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:59e34aab9f6e16a87e880b1ee1c9dd5434ee40dd29502e74aceefabf51443717",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:internaltesthealth08192019"
+ ],
+ "sizeBytes": 494053562
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:8ad12bce5ffd27b301bc6fe4355c8affa6fce080ae7e2291dec3a0ed11bb9483",
+ "rdilip83/health-rc:3"
+ ],
+ "sizeBytes": 494052863
+ },
+ {
+ "names": [
+ "rdilip83/health_ci_feature_image@sha256:1a574d25884483083e8cbaacbf0cb7c4e442dc736d480615c65f5c71f8969b13",
+ "rdilip83/health_ci_feature_image:v1"
+ ],
+ "sizeBytes": 494052147
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:816c8cef09822daf050a0fca6f92e7ac19147ff4bf1a823d43fe70f73470cc0c",
+ "rdilip83/healthrc:v3"
+ ],
+ "sizeBytes": 494052138
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:d35aac044d1adc3d02269fde78f8dfd923db94b81288447cf6fdd482970a333b",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthmerge08142019"
+ ],
+ "sizeBytes": 494052135
+ },
+ {
+ "names": [
+ "rdilip83/healthrc@sha256:a130780e56ac0edb3ca29477e12edd5e9b5d08b5732dbd59ede9beb58e21eca7",
+ "rdilip83/healthrc:v2"
+ ],
+ "sizeBytes": 494051682
+ },
+ {
+ "names": [
+ "rdilip83/healthmerge@sha256:24d270b0f59fb484c283922474736c3cba50f8aad0270bc0a3acd14284694eea",
+ "rdilip83/healthmerge:v8"
+ ],
+ "sizeBytes": 494010139
+ },
+ {
+ "names": [
+ "rdilip83/health-rc@sha256:b1d24728eb808d301da426b76b7f7b79606204c4c2b695a24ac670be8276d55d",
+ "rdilip83/health-rc:1"
+ ],
+ "sizeBytes": 494000891
+ },
+ {
+ "names": [
+ "rdilip83/mergehealth@sha256:32c9b35a6809c54d5296e2ca2b122b35a4ad8c852622174cc5a9f92cc27e56e4",
+ "rdilip83/mergehealth:v3"
+ ],
+ "sizeBytes": 493988815
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:0438e4690e042b195917e160b8949aeb339520ee19c898a8bb9452f36d1f84f1",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview07182019"
+ ],
+ "sizeBytes": 493977357
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:4e51195a9c77bd166fc90ee5f6143a4604b502ab7ef0f06431dec10c341b10f3",
+ "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:healthpreview06272019"
+ ],
+ "sizeBytes": 493893635
+ },
+ {
+ "names": [
+ "rdilip83/healthpreview06272019@sha256:d888ba5ff5e5810113a32f9c9812a5e28088cc81b902e95a185fe465a514029c",
+ "rdilip83/healthpreview06272019:latest"
+ ],
+ "sizeBytes": 493893633
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/prod/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41",
+ "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7"
+ ],
+ "sizeBytes": 383483267
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:b6834bb69e8fad88110b1dc57097a45bc79e6f2c5f2c2773c871d07389794771",
+ "k8s.gcr.io/cluster-autoscaler:v1.12.3"
+ ],
+ "sizeBytes": 232229241
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:dc5744fd8c22aebfe40d6b62ab97d18d7bfbfc7ab1782509d69a5a9ec514df2c",
+ "k8s.gcr.io/cluster-autoscaler:v1.12.2"
+ ],
+ "sizeBytes": 232167833
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:193eaf37788dd5f971dd400b7e3d28e650bfd81c90fa46b234f03eb3d43880e3",
+ "k8s.gcr.io/cluster-autoscaler:v1.12.5"
+ ],
+ "sizeBytes": 231543459
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:e71851267764a068fbb091a4ef3bb874b5ce34db48cb757fcf77779f30ef0207",
+ "k8s.gcr.io/cluster-autoscaler:v1.3.7"
+ ],
+ "sizeBytes": 217353965
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:156b7b9bcba24ed474f67d0feaf27f2506013f15b030341bbd41c630283161b8",
+ "k8s.gcr.io/cluster-autoscaler:v1.3.4"
+ ],
+ "sizeBytes": 217264129
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:97896235bf66bde573d6f2ee150e212ea7010d314eb5d2cfb2ff1af93335db30",
+ "k8s.gcr.io/cluster-autoscaler:v1.3.3"
+ ],
+ "sizeBytes": 217259793
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:b416bf3b6687788b4da4c7ede2bcf067b34ad781862ee3d3dac1d720c5fa38b3",
+ "k8s.gcr.io/cluster-autoscaler:v1.3.9"
+ ],
+ "sizeBytes": 216696035
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:f37a2c84614bdd02475ccb020182caec562cde97fdfd9dae58de66ff89614bc5",
+ "k8s.gcr.io/cluster-autoscaler:v1.3.8"
+ ],
+ "sizeBytes": 216693526
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:b0777becbfc7a56e66b079d2767fdc173121a29165523bbbe309bcb2c0a226aa",
+ "k8s.gcr.io/cluster-autoscaler:v1.2.5"
+ ],
+ "sizeBytes": 212991966
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:36a369ca4643542d501bce0addf8b903f2141ae9e2608662b77a3d24f01d7780",
+ "k8s.gcr.io/cluster-autoscaler:v1.2.2"
+ ],
+ "sizeBytes": 208688449
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/containernetworking/azure-npm@sha256:7b9e7dec6b06a21595f9aa06b319c99b579950619fa869dd85dc637b2235d79f",
+ "mcr.microsoft.com/containernetworking/azure-npm:v1.0.18"
+ ],
+ "sizeBytes": 170727162
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:760232bed2097b5ca742f05b15c94d56ff96ed6b5c93251edc613be045c8d78b",
+ "k8s.gcr.io/cluster-autoscaler:v1.15.0"
+ ],
+ "sizeBytes": 152214996
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:a4e5a8e6d4dc011e6e7a104d6abdfda56274b90357ee9f6e42cc22b70482420b",
+ "k8s.gcr.io/cluster-autoscaler:v1.14.0"
+ ],
+ "sizeBytes": 142102721
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:cbc61e0f6c3ef1c591a0f22ec483826110e2c10acddd5415c0cc2305fd085e69",
+ "k8s.gcr.io/cluster-autoscaler:v1.14.2"
+ ],
+ "sizeBytes": 142099784
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:9dcbd91e79f33c44529de58a0024deb3da23a3a0bc7fd4d028c1255c68f62fb7",
+ "k8s.gcr.io/cluster-autoscaler:v1.13.2"
+ ],
+ "sizeBytes": 136684274
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:e4140dc3ab54e115ab4464331b25022fc5ffb947b568aaf81089efb72506c895",
+ "k8s.gcr.io/cluster-autoscaler:v1.13.4"
+ ],
+ "sizeBytes": 136681463
+ },
+ {
+ "names": [
+ "k8s.gcr.io/cluster-autoscaler@sha256:7ff5a60304b344f2f29c804c7253632bbc818794f6932236a56db107a6a8f5af",
+ "k8s.gcr.io/cluster-autoscaler:v1.13.1"
+ ],
+ "sizeBytes": 136618018
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/containernetworking/networkmonitor@sha256:d875511410502c3e37804e1f313cc2b0a03d7a03d3d5e6adaf8994b753a76f8e",
+ "mcr.microsoft.com/containernetworking/networkmonitor:v0.0.6"
+ ],
+ "sizeBytes": 123663837
+ },
+ {
+ "names": [
+ "mcr.microsoft.com/containernetworking/networkmonitor@sha256:944408a497c451b0e79d2596dc2e9fe5036cdbba7fa831bff024e1c9ed44190d",
+ "mcr.microsoft.com/containernetworking/networkmonitor:v0.0.5"
+ ],
+ "sizeBytes": 122043325
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747",
+ "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1"
+ ],
+ "sizeBytes": 121711221
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kube-addon-manager-amd64@sha256:3da3f17cd4f02fe5696f29a5e6cd4aef7111f20dab9bec54ea35942346cfeb60",
+ "k8s.gcr.io/kube-addon-manager-amd64:v8.8"
+ ],
+ "sizeBytes": 99631084
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kube-addon-manager-amd64@sha256:2fd1daf3d3cf0e94a753f2263b60dbb0d42b107b5cde0c75ee3fc5c830e016e4",
+ "k8s.gcr.io/kube-addon-manager-amd64:v8.9"
+ ],
+ "sizeBytes": 99240637
+ },
+ {
+ "names": [
+ "microsoft/virtual-kubelet@sha256:efc397d741d7e590c892c0ea5dccc9a800656c3adb95da4dae25c1cdd5eb6d9f",
+ "microsoft/virtual-kubelet:latest"
+ ],
+ "sizeBytes": 87436458
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kube-addon-manager-amd64@sha256:672794ee3582521eb8bc4f257d0f70c92893f1989f39a200f9c84bcfe1aea7c9",
+ "k8s.gcr.io/kube-addon-manager-amd64:v9.0"
+ ],
+ "sizeBytes": 83077558
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kube-addon-manager-amd64@sha256:382c220b3531d9f95bf316a16b7282cc2ef929cd8a89a9dd3f5933edafc41a8e",
+ "k8s.gcr.io/kube-addon-manager-amd64:v9.0.1"
+ ],
+ "sizeBytes": 83076194
+ },
+ {
+ "names": [
+ "aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "deis/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2",
+ "deis/kube-svc-redirect:v1.0.2"
+ ],
+ "sizeBytes": 82897218
+ },
+ {
+ "names": [
+ "k8s.gcr.io/kube-addon-manager-amd64@sha256:3519273916ba45cfc9b318448d4629819cb5fbccbb0822cce054dd8c1f68cb60",
+ "k8s.gcr.io/kube-addon-manager-amd64:v8.6"
+ ],
+ "sizeBytes": 78384272
+ }
+ ],
+ "nodeInfo": {
+ "architecture": "amd64",
+ "bootID": "47e7c02b-3741-42be-a2a1-76c76aa8ccde",
+ "containerRuntimeVersion": "docker://3.0.6",
+ "kernelVersion": "4.15.0-1050-azure",
+ "kubeProxyVersion": "v1.11.8",
+ "kubeletVersion": "v1.11.8",
+ "machineID": "a4a4bc2f5a944cd38aba89365df05227",
+ "operatingSystem": "linux",
+ "osImage": "Ubuntu 16.04.6 LTS",
+ "systemUUID": "BB102B43-9922-264C-8C23-22A7DE0F950F"
+ }
+ }
+ }
+ ],
+ "kind": "List",
+ "metadata": {
+ "resourceVersion": "",
+ "selfLink": ""
+ }
+}
diff --git a/test/code/plugin/health/parent_monitor_provider_spec.rb b/test/code/plugin/health/parent_monitor_provider_spec.rb
index a83db50fc..b531629c4 100644
--- a/test/code/plugin/health/parent_monitor_provider_spec.rb
+++ b/test/code/plugin/health/parent_monitor_provider_spec.rb
@@ -97,7 +97,7 @@ def monitor.labels; {HealthMonitorLabels::ROLE => "master"}; end
assert_equal parent_id, "master_node_pool"
end
- it 'raises if conditions are not met' do
+ it 'returns defaultParentMonitorTypeId if conditions are not met' do
#arrange
definition = JSON.parse('{"conditional_monitor_id": {
"conditions": [
@@ -123,6 +123,7 @@ def monitor.labels; {HealthMonitorLabels::ROLE => "master"}; end
"container.azm.ms/cluster-resource-group",
"container.azm.ms/cluster-name"
],
+ "default_parent_monitor_id": "default_parent_monitor_id",
"aggregation_algorithm": "worstOf",
"aggregation_algorithm_params": null
}
@@ -137,8 +138,9 @@ def monitor.monitor_instance_id; "conditional_monitor_instance_id"; end
def monitor.labels; {HealthMonitorLabels::ROLE => "master1"}; end
#act and assert
- assert_raises do
- parent_id = health_model_definition.get_parent_monitor_id(monitor)
- end
+
+ parent_id = health_model_definition.get_parent_monitor_id(monitor)
+ parent_id.must_equal('default_parent_monitor_id')
+
end
end
diff --git a/test/code/plugin/health/pods.json b/test/code/plugin/health/pods.json
new file mode 100644
index 000000000..b7c202a19
--- /dev/null
+++ b/test/code/plugin/health/pods.json
@@ -0,0 +1,5987 @@
+{
+ "apiVersion": "v1",
+ "items": [
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-08-23T17:12:10Z",
+ "generateName": "heapster-9bcbfdcf5-",
+ "labels": {
+ "k8s-app": "heapster",
+ "pod-template-hash": "567698791"
+ },
+ "name": "heapster-9bcbfdcf5-zp9tl",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "heapster-9bcbfdcf5",
+ "uid": "24a0036e-c5c9-11e9-8736-86290fd7dd1f"
+ }
+ ],
+ "resourceVersion": "19048925",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/heapster-9bcbfdcf5-zp9tl",
+ "uid": "24ab7e32-c5c9-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/heapster",
+ "--source=kubernetes.summary_api:\"\""
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/healthz",
+ "port": 8082,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 180,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "heapster",
+ "resources": {
+ "limits": {
+ "cpu": "88m",
+ "memory": "204Mi"
+ },
+ "requests": {
+ "cpu": "88m",
+ "memory": "204Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "heapster-token-7z7c5",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "command": [
+ "/pod_nanny",
+ "--config-dir=/etc/config",
+ "--cpu=80m",
+ "--extra-cpu=0.5m",
+ "--memory=140Mi",
+ "--extra-memory=4Mi",
+ "--threshold=5",
+ "--deployment=heapster",
+ "--container=heapster",
+ "--poll-period=300000",
+ "--estimator=exponential"
+ ],
+ "env": [
+ {
+ "name": "MY_POD_NAME",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "metadata.name"
+ }
+ }
+ },
+ {
+ "name": "MY_POD_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "heapster-nanny",
+ "resources": {
+ "limits": {
+ "cpu": "50m",
+ "memory": "90Mi"
+ },
+ "requests": {
+ "cpu": "50m",
+ "memory": "90Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/config",
+ "name": "heapster-config-volume"
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "heapster-token-7z7c5",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-0",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "heapster",
+ "serviceAccountName": "heapster",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "heapster-config"
+ },
+ "name": "heapster-config-volume"
+ },
+ {
+ "name": "heapster-token-7z7c5",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "heapster-token-7z7c5"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:12:10Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:12:26Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:12:10Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://8ab1ee82d29d0351cb21dbce4db9eb2a270407d2ebe10377be02edd46cb34027",
+ "image": "aksrepos.azurecr.io/mirror/heapster-amd64:v1.5.3",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/heapster-amd64@sha256:fc33c690a3a446de5abc24b048b88050810a58b9e4477fa763a43d7df029301a",
+ "lastState": {},
+ "name": "heapster",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T17:12:21Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://42154ff41fed196c3f4b8a485436537330d16bcef23c743a34cf63202d023453",
+ "image": "aksrepos.azurecr.io/mirror/addon-resizer:1.8.1",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/addon-resizer@sha256:8ac3ffa4232046feb297cefc40734641fa2954c16308f9e0d70ec152f22231ca",
+ "lastState": {},
+ "name": "heapster-nanny",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T17:12:25Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.4",
+ "phase": "Running",
+ "podIP": "10.244.1.33",
+ "qosClass": "Guaranteed",
+ "startTime": "2019-08-23T17:12:10Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "scheduler.alpha.kubernetes.io/critical-pod": "",
+ "seccomp.security.alpha.kubernetes.io/pod": "docker/default"
+ },
+ "creationTimestamp": "2019-07-09T02:38:06Z",
+ "generateName": "kube-dns-autoscaler-7d64798d95-",
+ "labels": {
+ "k8s-app": "kube-dns-autoscaler",
+ "pod-template-hash": "3820354851"
+ },
+ "name": "kube-dns-autoscaler-7d64798d95-f9wcv",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "kube-dns-autoscaler-7d64798d95",
+ "uid": "71655f71-a1f2-11e9-9bc6-127bb0ec03b8"
+ }
+ ],
+ "resourceVersion": "15144041",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-dns-autoscaler-7d64798d95-f9wcv",
+ "uid": "94e52ab1-a1f2-11e9-8b08-d602e29755d5"
+ },
+ "spec": {
+ "containers": [
+ {
+ "command": [
+ "/cluster-proportional-autoscaler",
+ "--namespace=kube-system",
+ "--configmap=kube-dns-autoscaler",
+ "--target=deployment/kube-dns-v20",
+ "--default-params={\"ladder\":{\"coresToReplicas\":[[1,2],[512,3],[1024,4],[2048,5]],\"nodesToReplicas\":[[1,2],[8,3],[16,4],[32,5]]}}",
+ "--logtostderr=true",
+ "--v=2"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "autoscaler",
+ "resources": {
+ "requests": {
+ "cpu": "20m",
+ "memory": "10Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-autoscaler-token-zkxt8",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "Default",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-2",
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-dns-autoscaler",
+ "serviceAccountName": "kube-dns-autoscaler",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "name": "kube-dns-autoscaler-token-zkxt8",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-dns-autoscaler-token-zkxt8"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:07Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:44Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:06Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://43f5fde3ce0f375a40c08de56087fc3b53f6269b239a3e6383d2082779504b96",
+ "image": "aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64:1.1.2-r2",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/cluster-proportional-autoscaler-amd64@sha256:ccd2b031b116750091443930a8e6d0f785cfde38f137969e472b2ac850aeddfb",
+ "lastState": {},
+ "name": "autoscaler",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:40Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.7",
+ "phase": "Running",
+ "podIP": "10.244.12.118",
+ "qosClass": "Burstable",
+ "startTime": "2019-07-09T02:38:07Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "prometheus.io/port": "10055",
+ "prometheus.io/scrape": "true"
+ },
+ "creationTimestamp": "2019-07-09T02:38:06Z",
+ "generateName": "kube-dns-v20-55cb5d96f7-",
+ "labels": {
+ "k8s-app": "kube-dns",
+ "kubernetes.io/cluster-service": "true",
+ "pod-template-hash": "1176185293",
+ "version": "v20"
+ },
+ "name": "kube-dns-v20-55cb5d96f7-lmrpl",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "kube-dns-v20-55cb5d96f7",
+ "uid": "71892fd6-a1f2-11e9-9bc6-127bb0ec03b8"
+ }
+ ],
+ "resourceVersion": "15144030",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-dns-v20-55cb5d96f7-lmrpl",
+ "uid": "952488f3-a1f2-11e9-8b08-d602e29755d5"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "podAntiAffinity": {
+ "preferredDuringSchedulingIgnoredDuringExecution": [
+ {
+ "podAffinityTerm": {
+ "labelSelector": {
+ "matchExpressions": [
+ {
+ "key": "k8s-app",
+ "operator": "In",
+ "values": [
+ "kube-dns"
+ ]
+ }
+ ]
+ },
+ "topologyKey": "kubernetes.io/hostname"
+ },
+ "weight": 100
+ }
+ ]
+ }
+ },
+ "containers": [
+ {
+ "args": [
+ "--kubecfg-file=/config/kubeconfig",
+ "--config-dir=/kube-dns-config",
+ "--domain=cluster.local.",
+ "--dns-port=10053",
+ "--v=2"
+ ],
+ "env": [
+ {
+ "name": "PROMETHEUS_PORT",
+ "value": "10055"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 5,
+ "httpGet": {
+ "path": "/healthcheck/kubedns",
+ "port": 10054,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "kubedns",
+ "ports": [
+ {
+ "containerPort": 10053,
+ "name": "dns-local",
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 10053,
+ "name": "dns-tcp-local",
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 10055,
+ "name": "metrics",
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/readiness",
+ "port": 8081,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 30,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "resources": {
+ "limits": {
+ "memory": "170Mi"
+ },
+ "requests": {
+ "cpu": "100m",
+ "memory": "70Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/kube-dns-config",
+ "name": "kube-dns-config"
+ },
+ {
+ "mountPath": "/config",
+ "name": "kubedns-kubecfg",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "-v=2",
+ "-logtostderr",
+ "-configDir=/kube-dns-config",
+ "-restartDnsmasq=true",
+ "--",
+ "-k",
+ "--cache-size=1000",
+ "--no-negcache",
+ "--no-resolv",
+ "--server=127.0.0.1#10053",
+ "--server=/cluster.local/127.0.0.1#10053",
+ "--server=/in-addr.arpa/127.0.0.1#10053",
+ "--server=/ip6.arpa/127.0.0.1#10053",
+ "--log-facility=-"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "dnsmasq",
+ "ports": [
+ {
+ "containerPort": 53,
+ "name": "dns",
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 53,
+ "name": "dns-tcp",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {},
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/kube-dns-config",
+ "name": "kube-dns-config"
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \u003e/dev/null || exit 1; done",
+ "--url=/healthz-dnsmasq",
+ "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \u003e/dev/null || exit 1; done",
+ "--url=/healthz-kubedns",
+ "--port=8080",
+ "--quiet"
+ ],
+ "env": [
+ {
+ "name": "PROBE_DOMAINS",
+ "value": "bing.com kubernetes.default.svc.cluster.local"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 5,
+ "httpGet": {
+ "path": "/healthz-dnsmasq",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "healthz",
+ "ports": [
+ {
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "50Mi"
+ },
+ "requests": {
+ "cpu": "10m",
+ "memory": "50Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "--v=2",
+ "--logtostderr",
+ "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV",
+ "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/metrics",
+ "port": 10054,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "sidecar",
+ "ports": [
+ {
+ "containerPort": 10054,
+ "name": "metrics",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "10m",
+ "memory": "20Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "Default",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-1",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-dns",
+ "serviceAccountName": "kube-dns",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "kube-dns",
+ "optional": true
+ },
+ "name": "kube-dns-config"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "kubedns-kubecfg"
+ },
+ "name": "kubedns-kubecfg"
+ },
+ {
+ "name": "kube-dns-token-ghgtl",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-dns-token-ghgtl"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:09Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:50Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:06Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://8aa7d794d423f29469d8a35cc295bfaf2434a26756d7063fb19e06ce838aa5d9",
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8",
+ "lastState": {},
+ "name": "dnsmasq",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:23Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://7ee72258ca97555017c3096c3c125935b22e1735dafd494bec7f5480a408314a",
+ "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/exechealthz-amd64@sha256:34722333f0cd0b891b61c9e0efa31913f22157e341a3aabb79967305d4e78260",
+ "lastState": {},
+ "name": "healthz",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:25Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://bf6c7e823d08306e6ba13353ae89319080990a5d302b1d7370e76acd34c34a52",
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52",
+ "lastState": {},
+ "name": "kubedns",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:19Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://2e4faf4da65a23316dc7065e3de27bf1ebd9ac2a8f07b9053de5ab63ab4c2d7e",
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4",
+ "lastState": {},
+ "name": "sidecar",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:28Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.5",
+ "phase": "Running",
+ "podIP": "10.244.0.192",
+ "qosClass": "Burstable",
+ "startTime": "2019-07-09T02:38:09Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "prometheus.io/port": "10055",
+ "prometheus.io/scrape": "true"
+ },
+ "creationTimestamp": "2019-07-09T02:38:06Z",
+ "generateName": "kube-dns-v20-55cb5d96f7-",
+ "labels": {
+ "k8s-app": "kube-dns",
+ "kubernetes.io/cluster-service": "true",
+ "pod-template-hash": "1176185293",
+ "version": "v20"
+ },
+ "name": "kube-dns-v20-55cb5d96f7-pl7sh",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "kube-dns-v20-55cb5d96f7",
+ "uid": "71892fd6-a1f2-11e9-9bc6-127bb0ec03b8"
+ }
+ ],
+ "resourceVersion": "15144050",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-dns-v20-55cb5d96f7-pl7sh",
+ "uid": "95046bc6-a1f2-11e9-8b08-d602e29755d5"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "podAntiAffinity": {
+ "preferredDuringSchedulingIgnoredDuringExecution": [
+ {
+ "podAffinityTerm": {
+ "labelSelector": {
+ "matchExpressions": [
+ {
+ "key": "k8s-app",
+ "operator": "In",
+ "values": [
+ "kube-dns"
+ ]
+ }
+ ]
+ },
+ "topologyKey": "kubernetes.io/hostname"
+ },
+ "weight": 100
+ }
+ ]
+ }
+ },
+ "containers": [
+ {
+ "args": [
+ "--kubecfg-file=/config/kubeconfig",
+ "--config-dir=/kube-dns-config",
+ "--domain=cluster.local.",
+ "--dns-port=10053",
+ "--v=2"
+ ],
+ "env": [
+ {
+ "name": "PROMETHEUS_PORT",
+ "value": "10055"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 5,
+ "httpGet": {
+ "path": "/healthcheck/kubedns",
+ "port": 10054,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "kubedns",
+ "ports": [
+ {
+ "containerPort": 10053,
+ "name": "dns-local",
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 10053,
+ "name": "dns-tcp-local",
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 10055,
+ "name": "metrics",
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/readiness",
+ "port": 8081,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 30,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "resources": {
+ "limits": {
+ "memory": "170Mi"
+ },
+ "requests": {
+ "cpu": "100m",
+ "memory": "70Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/kube-dns-config",
+ "name": "kube-dns-config"
+ },
+ {
+ "mountPath": "/config",
+ "name": "kubedns-kubecfg",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "-v=2",
+ "-logtostderr",
+ "-configDir=/kube-dns-config",
+ "-restartDnsmasq=true",
+ "--",
+ "-k",
+ "--cache-size=1000",
+ "--no-negcache",
+ "--no-resolv",
+ "--server=127.0.0.1#10053",
+ "--server=/cluster.local/127.0.0.1#10053",
+ "--server=/in-addr.arpa/127.0.0.1#10053",
+ "--server=/ip6.arpa/127.0.0.1#10053",
+ "--log-facility=-"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "dnsmasq",
+ "ports": [
+ {
+ "containerPort": 53,
+ "name": "dns",
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 53,
+ "name": "dns-tcp",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {},
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/kube-dns-config",
+ "name": "kube-dns-config"
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1 \u003e/dev/null || exit 1; done",
+ "--url=/healthz-dnsmasq",
+ "--cmd=for d in $PROBE_DOMAINS; do nslookup $d 127.0.0.1:10053 \u003e/dev/null || exit 1; done",
+ "--url=/healthz-kubedns",
+ "--port=8080",
+ "--quiet"
+ ],
+ "env": [
+ {
+ "name": "PROBE_DOMAINS",
+ "value": "bing.com kubernetes.default.svc.cluster.local"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 5,
+ "httpGet": {
+ "path": "/healthz-dnsmasq",
+ "port": 8080,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "healthz",
+ "ports": [
+ {
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "50Mi"
+ },
+ "requests": {
+ "cpu": "10m",
+ "memory": "50Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "args": [
+ "--v=2",
+ "--logtostderr",
+ "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV",
+ "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/metrics",
+ "port": 10054,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 60,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 5
+ },
+ "name": "sidecar",
+ "ports": [
+ {
+ "containerPort": 10054,
+ "name": "metrics",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "10m",
+ "memory": "20Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-dns-token-ghgtl",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "Default",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-2",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-dns",
+ "serviceAccountName": "kube-dns",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "kube-dns",
+ "optional": true
+ },
+ "name": "kube-dns-config"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "kubedns-kubecfg"
+ },
+ "name": "kubedns-kubecfg"
+ },
+ {
+ "name": "kube-dns-token-ghgtl",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-dns-token-ghgtl"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:10Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:39:14Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:06Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://c16dce3b5c1f06c6fbfdf52edb98f9916740c0f652dc72b2fe0f9f0cc5c4c4de",
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64:1.14.10",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-dnsmasq-nanny-amd64@sha256:bbb2a290a568125b3b996028958eb773f33b5b87a6b37bf38a28f8b62dddb3c8",
+ "lastState": {},
+ "name": "dnsmasq",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:51Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://410ceb88fcbc2c3cdf19ffc5ce88adb0ba933bbc3cf446a90e669a978a7d933c",
+ "image": "aksrepos.azurecr.io/mirror/exechealthz-amd64:1.2",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/exechealthz-amd64@sha256:34722333f0cd0b891b61c9e0efa31913f22157e341a3aabb79967305d4e78260",
+ "lastState": {},
+ "name": "healthz",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:58Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://694f575606b51234a98b3e22d2afd04f3fa11c30b6090a901e64922eeb9fba95",
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64:1.14.13",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-kube-dns-amd64@sha256:618a82fa66cf0c75e4753369a6999032372be7308866fc9afb381789b1e5ad52",
+ "lastState": {},
+ "name": "kubedns",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:39Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://d7865fb7465b2f9cd218cdf6694018aee55260966f2bf51e6b628a86c6b9041f",
+ "image": "aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64:1.14.10",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/k8s-dns-sidecar-amd64@sha256:4f1ab957f87b94a5ec1edc26fae50da2175461f00afecf68940c4aa079bd08a4",
+ "lastState": {},
+ "name": "sidecar",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:39:04Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.7",
+ "phase": "Running",
+ "podIP": "10.244.12.117",
+ "qosClass": "Burstable",
+ "startTime": "2019-07-09T02:38:10Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ",
+ "remediator.aks.microsoft.com/kube-proxy-restart": "24"
+ },
+ "creationTimestamp": "2019-08-23T17:13:13Z",
+ "generateName": "kube-proxy-",
+ "labels": {
+ "component": "kube-proxy",
+ "controller-revision-hash": "3559350992",
+ "pod-template-generation": "141",
+ "tier": "node"
+ },
+ "name": "kube-proxy-ct2tl",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-proxy",
+ "uid": "45640bf6-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "19049034",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-ct2tl",
+ "uid": "49e373c8-c5c9-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/hyperkube",
+ "proxy",
+ "--kubeconfig=/var/lib/kubelet/kubeconfig",
+ "--cluster-cidr=10.244.0.0/16",
+ "--feature-gates=ExperimentalCriticalPodAnnotation=true",
+ "--v=3"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "kube-proxy",
+ "resources": {
+ "requests": {
+ "cpu": "100m"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/kubelet",
+ "name": "kubeconfig",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/kubernetes/certs",
+ "name": "certificates",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-proxy-token-f5vbg",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-0",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-proxy",
+ "serviceAccountName": "kube-proxy",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/var/lib/kubelet",
+ "type": ""
+ },
+ "name": "kubeconfig"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes/certs",
+ "type": ""
+ },
+ "name": "certificates"
+ },
+ {
+ "name": "kube-proxy-token-f5vbg",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-proxy-token-f5vbg"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:13:13Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:13:23Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:13:13Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://ef115b31792ece39d1526075f9f3763f8cbf526814624795a05786d83367427e",
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "lastState": {},
+ "name": "kube-proxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T17:13:22Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.4",
+ "phase": "Running",
+ "podIP": "10.240.0.4",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T17:13:13Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ",
+ "remediator.aks.microsoft.com/kube-proxy-restart": "24"
+ },
+ "creationTimestamp": "2019-08-23T17:10:52Z",
+ "generateName": "kube-proxy-",
+ "labels": {
+ "component": "kube-proxy",
+ "controller-revision-hash": "3559350992",
+ "pod-template-generation": "141",
+ "tier": "node"
+ },
+ "name": "kube-proxy-d59xd",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-proxy",
+ "uid": "45640bf6-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "19048698",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-d59xd",
+ "uid": "f65e6a62-c5c8-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/hyperkube",
+ "proxy",
+ "--kubeconfig=/var/lib/kubelet/kubeconfig",
+ "--cluster-cidr=10.244.0.0/16",
+ "--feature-gates=ExperimentalCriticalPodAnnotation=true",
+ "--v=3"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "kube-proxy",
+ "resources": {
+ "requests": {
+ "cpu": "100m"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/kubelet",
+ "name": "kubeconfig",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/kubernetes/certs",
+ "name": "certificates",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-proxy-token-f5vbg",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-1",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-proxy",
+ "serviceAccountName": "kube-proxy",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/var/lib/kubelet",
+ "type": ""
+ },
+ "name": "kubeconfig"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes/certs",
+ "type": ""
+ },
+ "name": "certificates"
+ },
+ {
+ "name": "kube-proxy-token-f5vbg",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-proxy-token-f5vbg"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:10:52Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:11:05Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:10:52Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://c4e9d0e372116b9cab048f7bb381e93b423dac2285da75f66664a473fcc043b3",
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "lastState": {},
+ "name": "kube-proxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T17:11:04Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.5",
+ "phase": "Running",
+ "podIP": "10.240.0.5",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T17:10:52Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ",
+ "remediator.aks.microsoft.com/kube-proxy-restart": "24"
+ },
+ "creationTimestamp": "2019-08-23T17:12:23Z",
+ "generateName": "kube-proxy-",
+ "labels": {
+ "component": "kube-proxy",
+ "controller-revision-hash": "3559350992",
+ "pod-template-generation": "141",
+ "tier": "node"
+ },
+ "name": "kube-proxy-kpm8j",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-proxy",
+ "uid": "45640bf6-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "19048942",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-kpm8j",
+ "uid": "2c3de48d-c5c9-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/hyperkube",
+ "proxy",
+ "--kubeconfig=/var/lib/kubelet/kubeconfig",
+ "--cluster-cidr=10.244.0.0/16",
+ "--feature-gates=ExperimentalCriticalPodAnnotation=true",
+ "--v=3"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "kube-proxy",
+ "resources": {
+ "requests": {
+ "cpu": "100m"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/kubelet",
+ "name": "kubeconfig",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/kubernetes/certs",
+ "name": "certificates",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-proxy-token-f5vbg",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-2",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-proxy",
+ "serviceAccountName": "kube-proxy",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/var/lib/kubelet",
+ "type": ""
+ },
+ "name": "kubeconfig"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes/certs",
+ "type": ""
+ },
+ "name": "certificates"
+ },
+ {
+ "name": "kube-proxy-token-f5vbg",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-proxy-token-f5vbg"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:12:24Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:12:34Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:12:24Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://51067a965113e6d285a676e0d1e212ffbb60046aab6c4702f5554617415b2031",
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "lastState": {},
+ "name": "kube-proxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T17:12:33Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.7",
+ "phase": "Running",
+ "podIP": "10.240.0.7",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T17:12:24Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "aks.microsoft.com/release-time": "seconds:1566580134 nanos:758740921 ",
+ "remediator.aks.microsoft.com/kube-proxy-restart": "24"
+ },
+ "creationTimestamp": "2019-08-23T17:11:38Z",
+ "generateName": "kube-proxy-",
+ "labels": {
+ "component": "kube-proxy",
+ "controller-revision-hash": "3559350992",
+ "pod-template-generation": "141",
+ "tier": "node"
+ },
+ "name": "kube-proxy-skzg4",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-proxy",
+ "uid": "45640bf6-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "19048774",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-proxy-skzg4",
+ "uid": "114f7246-c5c9-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/hyperkube",
+ "proxy",
+ "--kubeconfig=/var/lib/kubelet/kubeconfig",
+ "--cluster-cidr=10.244.0.0/16",
+ "--feature-gates=ExperimentalCriticalPodAnnotation=true",
+ "--v=3"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "kube-proxy",
+ "resources": {
+ "requests": {
+ "cpu": "100m"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/kubelet",
+ "name": "kubeconfig",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/kubernetes/certs",
+ "name": "certificates",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-proxy-token-f5vbg",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-3",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-proxy",
+ "serviceAccountName": "kube-proxy",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/var/lib/kubelet",
+ "type": ""
+ },
+ "name": "kubeconfig"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes/certs",
+ "type": ""
+ },
+ "name": "certificates"
+ },
+ {
+ "name": "kube-proxy-token-f5vbg",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-proxy-token-f5vbg"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:11:38Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:11:42Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T17:11:38Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://a3172e9191547b0ea3eb7db629cd4bba2240f5c9d0186ea37be49d9877034541",
+ "image": "aksrepos.azurecr.io/mirror/hyperkube-amd64:v1.11.8",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/hyperkube-amd64@sha256:1447d5b491fcee503c9f8fb712e1593dc3772c7e661251f54c297477cc716913",
+ "lastState": {},
+ "name": "kube-proxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T17:11:41Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.6",
+ "phase": "Running",
+ "podIP": "10.240.0.6",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T17:11:38Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-07-15T20:54:26Z",
+ "generateName": "kube-svc-redirect-",
+ "labels": {
+ "component": "kube-svc-redirect",
+ "controller-revision-hash": "1216437240",
+ "pod-template-generation": "9",
+ "tier": "node"
+ },
+ "name": "kube-svc-redirect-czm8d",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-svc-redirect",
+ "uid": "45a5fc62-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "15831523",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-czm8d",
+ "uid": "bb3d3ef2-a742-11e9-a38a-22d1c75c4357"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_SVC_IP",
+ "value": "10.0.0.1"
+ },
+ {
+ "name": "KUBE_SVC_REDIRECTOR_PROXY_IP",
+ "value": "127.0.0.1:14612"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "redirector",
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "2Mi"
+ }
+ },
+ "securityContext": {
+ "capabilities": {
+ "add": [
+ "NET_ADMIN"
+ ]
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "azureproxy",
+ "ports": [
+ {
+ "containerPort": 14612,
+ "hostPort": 14612,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "32Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/nginx/nginx.conf",
+ "name": "azureproxy-nginx",
+ "readOnly": true,
+ "subPath": "nginx.conf"
+ },
+ {
+ "mountPath": "/etc/nginx/conf.d",
+ "name": "azureproxy-configs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/nginx/certs",
+ "name": "azureproxy-certs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-0",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-svc-redirector",
+ "serviceAccountName": "kube-svc-redirector",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-nginx"
+ },
+ "name": "azureproxy-nginx"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-config"
+ },
+ "name": "azureproxy-configs"
+ },
+ {
+ "name": "azureproxy-certs",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "azureproxy-certs"
+ }
+ },
+ {
+ "name": "kube-svc-redirector-token-ngjg2",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-svc-redirector-token-ngjg2"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:54:26Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:55:03Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:54:26Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://942d4ddc66e488245fa77cf331a38de7df760d5d5d96b344f5bfbc84adbab861",
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d",
+ "lastState": {},
+ "name": "azureproxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-15T20:55:02Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://71d6f73215c0994fa2f7b340732d5e4453a86ece31dcf5278fb2abc32e3e4de2",
+ "image": "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "lastState": {},
+ "name": "redirector",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-15T20:54:36Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.4",
+ "phase": "Running",
+ "podIP": "10.240.0.4",
+ "qosClass": "Burstable",
+ "startTime": "2019-07-15T20:54:26Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-08-07T18:57:56Z",
+ "generateName": "kube-svc-redirect-",
+ "labels": {
+ "component": "kube-svc-redirect",
+ "controller-revision-hash": "1216437240",
+ "pod-template-generation": "9",
+ "tier": "node"
+ },
+ "name": "kube-svc-redirect-mqk98",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-svc-redirect",
+ "uid": "45a5fc62-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "16965477",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-mqk98",
+ "uid": "44a61692-b945-11e9-a1b6-127094e7fd94"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_SVC_IP",
+ "value": "10.0.0.1"
+ },
+ {
+ "name": "KUBE_SVC_REDIRECTOR_PROXY_IP",
+ "value": "127.0.0.1:14612"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "redirector",
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "2Mi"
+ }
+ },
+ "securityContext": {
+ "capabilities": {
+ "add": [
+ "NET_ADMIN"
+ ]
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "azureproxy",
+ "ports": [
+ {
+ "containerPort": 14612,
+ "hostPort": 14612,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "32Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/nginx/nginx.conf",
+ "name": "azureproxy-nginx",
+ "readOnly": true,
+ "subPath": "nginx.conf"
+ },
+ {
+ "mountPath": "/etc/nginx/conf.d",
+ "name": "azureproxy-configs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/nginx/certs",
+ "name": "azureproxy-certs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-3",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-svc-redirector",
+ "serviceAccountName": "kube-svc-redirector",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-nginx"
+ },
+ "name": "azureproxy-nginx"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-config"
+ },
+ "name": "azureproxy-configs"
+ },
+ {
+ "name": "azureproxy-certs",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "azureproxy-certs"
+ }
+ },
+ {
+ "name": "kube-svc-redirector-token-ngjg2",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-svc-redirector-token-ngjg2"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-07T18:57:58Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-07T18:58:09Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-07T18:57:58Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://5f47547dc8e4fceb8e2a6e01cee5612b49e2dc2d5682b6a58f648d8223b3a6b0",
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d",
+ "lastState": {},
+ "name": "azureproxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-07T18:58:09Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://5da4e17288399f8e2d4998e5c06159d0d2d39690e89195c5381ab7e3c91aaf99",
+ "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/prod/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "lastState": {},
+ "name": "redirector",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-07T18:58:08Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.6",
+ "phase": "Running",
+ "podIP": "10.240.0.6",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-07T18:57:58Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-07-15T20:55:38Z",
+ "generateName": "kube-svc-redirect-",
+ "labels": {
+ "component": "kube-svc-redirect",
+ "controller-revision-hash": "1216437240",
+ "pod-template-generation": "9",
+ "tier": "node"
+ },
+ "name": "kube-svc-redirect-qf4tl",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-svc-redirect",
+ "uid": "45a5fc62-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "15144014",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-qf4tl",
+ "uid": "e690309f-a742-11e9-a38a-22d1c75c4357"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_SVC_IP",
+ "value": "10.0.0.1"
+ },
+ {
+ "name": "KUBE_SVC_REDIRECTOR_PROXY_IP",
+ "value": "127.0.0.1:14612"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "redirector",
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "2Mi"
+ }
+ },
+ "securityContext": {
+ "capabilities": {
+ "add": [
+ "NET_ADMIN"
+ ]
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "azureproxy",
+ "ports": [
+ {
+ "containerPort": 14612,
+ "hostPort": 14612,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "32Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/nginx/nginx.conf",
+ "name": "azureproxy-nginx",
+ "readOnly": true,
+ "subPath": "nginx.conf"
+ },
+ {
+ "mountPath": "/etc/nginx/conf.d",
+ "name": "azureproxy-configs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/nginx/certs",
+ "name": "azureproxy-certs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-1",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-svc-redirector",
+ "serviceAccountName": "kube-svc-redirector",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-nginx"
+ },
+ "name": "azureproxy-nginx"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-config"
+ },
+ "name": "azureproxy-configs"
+ },
+ {
+ "name": "azureproxy-certs",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "azureproxy-certs"
+ }
+ },
+ {
+ "name": "kube-svc-redirector-token-ngjg2",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-svc-redirector-token-ngjg2"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:55:38Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:55:47Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:55:38Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://a0fa774ceba9ae78cf75ffb96a0d8f3ca4d48e5d9d17218957b07e8b1e7e2862",
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d",
+ "lastState": {},
+ "name": "azureproxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-15T20:55:46Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://7f281954c57ff6529aaeea2e79dc45a8abeabd4b360c2bbea5c0830ddac4f093",
+ "image": "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "lastState": {},
+ "name": "redirector",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-15T20:55:44Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.5",
+ "phase": "Running",
+ "podIP": "10.240.0.5",
+ "qosClass": "Burstable",
+ "startTime": "2019-07-15T20:55:38Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-07-15T20:56:33Z",
+ "generateName": "kube-svc-redirect-",
+ "labels": {
+ "component": "kube-svc-redirect",
+ "controller-revision-hash": "1216437240",
+ "pod-template-generation": "9",
+ "tier": "node"
+ },
+ "name": "kube-svc-redirect-rtw2t",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "kube-svc-redirect",
+ "uid": "45a5fc62-44e5-11e9-9920-423525a6b683"
+ }
+ ],
+ "resourceVersion": "15144039",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kube-svc-redirect-rtw2t",
+ "uid": "06fef5f6-a743-11e9-a38a-22d1c75c4357"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_SVC_IP",
+ "value": "10.0.0.1"
+ },
+ {
+ "name": "KUBE_SVC_REDIRECTOR_PROXY_IP",
+ "value": "127.0.0.1:14612"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/prod/kube-svc-redirect:v1.0.2",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "redirector",
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "2Mi"
+ }
+ },
+ "securityContext": {
+ "capabilities": {
+ "add": [
+ "NET_ADMIN"
+ ]
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ },
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "azureproxy",
+ "ports": [
+ {
+ "containerPort": 14612,
+ "hostPort": 14612,
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "requests": {
+ "cpu": "5m",
+ "memory": "32Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/nginx/nginx.conf",
+ "name": "azureproxy-nginx",
+ "readOnly": true,
+ "subPath": "nginx.conf"
+ },
+ {
+ "mountPath": "/etc/nginx/conf.d",
+ "name": "azureproxy-configs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/nginx/certs",
+ "name": "azureproxy-certs",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kube-svc-redirector-token-ngjg2",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "hostNetwork": true,
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-2",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kube-svc-redirector",
+ "serviceAccountName": "kube-svc-redirector",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/network-unavailable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-nginx"
+ },
+ "name": "azureproxy-nginx"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "azureproxy-config"
+ },
+ "name": "azureproxy-configs"
+ },
+ {
+ "name": "azureproxy-certs",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "azureproxy-certs"
+ }
+ },
+ {
+ "name": "kube-svc-redirector-token-ngjg2",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kube-svc-redirector-token-ngjg2"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:56:33Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:56:49Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-15T20:56:33Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://aaea93b1e6a0c55e9ac0c002ffa6fdfb99e98b2f1a38c474cc2b9b65e947b6d9",
+ "image": "aksrepos.azurecr.io/mirror/nginx:1.13.12-alpine",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/nginx@sha256:91d22184f3f9b1be658c2cc2c12d324de7ff12c8b9c9a597905457b4d93b069d",
+ "lastState": {},
+ "name": "azureproxy",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-15T20:56:48Z"
+ }
+ }
+ },
+ {
+ "containerID": "docker://c03c8b9e99095205945e15bef5f60c0501c8a0a77186afc1fcc8eb0804274e78",
+ "image": "aksrepos.azurecr.io/mirror/kube-svc-redirect:v1.0.2",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kube-svc-redirect@sha256:a448687b78d24dae388bd3d54591c179c891fa078404752bc9c9dfdaecdc02ef",
+ "lastState": {},
+ "name": "redirector",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-15T20:56:43Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.7",
+ "phase": "Running",
+ "podIP": "10.240.0.7",
+ "qosClass": "Burstable",
+ "startTime": "2019-07-15T20:56:33Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-07-09T02:38:07Z",
+ "generateName": "kubernetes-dashboard-6dcdfcd68b-",
+ "labels": {
+ "k8s-app": "kubernetes-dashboard",
+ "kubernetes.io/cluster-service": "true",
+ "pod-template-hash": "2878978246"
+ },
+ "name": "kubernetes-dashboard-6dcdfcd68b-nfqbf",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "kubernetes-dashboard-6dcdfcd68b",
+ "uid": "71ff2821-a1f2-11e9-9bc6-127bb0ec03b8"
+ }
+ ],
+ "resourceVersion": "15831517",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/kubernetes-dashboard-6dcdfcd68b-nfqbf",
+ "uid": "9583b2ab-a1f2-11e9-8b08-d602e29755d5"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "failureThreshold": 3,
+ "httpGet": {
+ "path": "/",
+ "port": 9090,
+ "scheme": "HTTP"
+ },
+ "initialDelaySeconds": 30,
+ "periodSeconds": 10,
+ "successThreshold": 1,
+ "timeoutSeconds": 30
+ },
+ "name": "main",
+ "ports": [
+ {
+ "containerPort": 9090,
+ "name": "http",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "100m",
+ "memory": "500Mi"
+ },
+ "requests": {
+ "cpu": "100m",
+ "memory": "50Mi"
+ }
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "kubernetes-dashboard-token-w4t8s",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-0",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "kubernetes-dashboard",
+ "serviceAccountName": "kubernetes-dashboard",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "name": "kubernetes-dashboard-token-w4t8s",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "kubernetes-dashboard-token-w4t8s"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:14Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:39:08Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:07Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://2b042ce7bdf3d03cb606317b19ee797cbf7b99c65076a67001064bccb313b3cb",
+ "image": "aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64:v1.10.1",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/kubernetes-dashboard-amd64@sha256:0ae6b69432e78069c5ce2bcde0fe409c5c4d6f0f4d9cd50a17974fea38898747",
+ "lastState": {},
+ "name": "main",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:39:07Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.4",
+ "phase": "Running",
+ "podIP": "10.244.1.197",
+ "qosClass": "Burstable",
+ "startTime": "2019-07-09T02:38:14Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-07-09T02:38:06Z",
+ "generateName": "metrics-server-76cd9fb66-",
+ "labels": {
+ "k8s-app": "metrics-server",
+ "pod-template-hash": "327859622"
+ },
+ "name": "metrics-server-76cd9fb66-h2q55",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "metrics-server-76cd9fb66",
+ "uid": "71c837df-a1f2-11e9-9bc6-127bb0ec03b8"
+ }
+ ],
+ "resourceVersion": "15144037",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/metrics-server-76cd9fb66-h2q55",
+ "uid": "9543dbb7-a1f2-11e9-8b08-d602e29755d5"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "command": [
+ "/metrics-server",
+ "--source=kubernetes.summary_api:''"
+ ],
+ "env": [
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "metrics-server",
+ "resources": {},
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "metrics-server-token-qtdgm",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-1",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "metrics-server",
+ "serviceAccountName": "metrics-server",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "name": "metrics-server-token-qtdgm",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "metrics-server-token-qtdgm"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:09Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:20Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-07-09T02:38:07Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://f60ef82657e5ccdfb611a4f3381848dff77a01bddf95c431e4b7a2bf6f4b8087",
+ "image": "aksrepos.azurecr.io/mirror/metrics-server-amd64:v0.2.1",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/mirror/metrics-server-amd64@sha256:220c0ed3451cb95e4b2f72dd5dc8d9d39d9f529722e5b29d8286373ce27b117e",
+ "lastState": {},
+ "name": "metrics-server",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-07-09T02:38:18Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.5",
+ "phase": "Running",
+ "podIP": "10.244.0.193",
+ "qosClass": "BestEffort",
+ "startTime": "2019-07-09T02:38:09Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "agentVersion": "1.10.0.1",
+ "dockerProviderVersion": "6.0.0-0",
+ "schema-versions": "v1"
+ },
+ "creationTimestamp": "2019-08-23T19:53:57Z",
+ "generateName": "omsagent-",
+ "labels": {
+ "controller-revision-hash": "868116844",
+ "dsName": "omsagent-ds",
+ "pod-template-generation": "9"
+ },
+ "name": "omsagent-25pks",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "omsagent",
+ "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f"
+ }
+ ],
+ "resourceVersion": "19063729",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-25pks",
+ "uid": "be78d7f6-c5df-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "AKS_RESOURCE_ID",
+ "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test"
+ },
+ {
+ "name": "AKS_REGION",
+ "value": "eastus"
+ },
+ {
+ "name": "CONTROLLER_TYPE",
+ "value": "DaemonSet"
+ },
+ {
+ "name": "NODE_IP",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "status.hostIP"
+ }
+ }
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/livenessprobe.sh"
+ ]
+ },
+ "failureThreshold": 3,
+ "initialDelaySeconds": 60,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "omsagent",
+ "ports": [
+ {
+ "containerPort": 25225,
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 25224,
+ "protocol": "UDP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "150m",
+ "memory": "600Mi"
+ },
+ "requests": {
+ "cpu": "75m",
+ "memory": "225Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/hostfs",
+ "name": "host-root",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/host",
+ "name": "docker-sock"
+ },
+ {
+ "mountPath": "/var/log",
+ "name": "host-log"
+ },
+ {
+ "mountPath": "/var/lib/docker/containers",
+ "name": "containerlog-path"
+ },
+ {
+ "mountPath": "/etc/kubernetes/host",
+ "name": "azure-json-path"
+ },
+ {
+ "mountPath": "/etc/omsagent-secret",
+ "name": "omsagent-secret"
+ },
+ {
+ "mountPath": "/etc/config/settings",
+ "name": "settings-vol-config",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "omsagent-token-fjmqb",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "nodeName": "aks-nodepool1-19574989-2",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "omsagent",
+ "serviceAccountName": "omsagent",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoSchedule",
+ "key": "node-role.kubernetes.io/master",
+ "operator": "Equal",
+ "value": "true"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/",
+ "type": ""
+ },
+ "name": "host-root"
+ },
+ {
+ "hostPath": {
+ "path": "/var/run",
+ "type": ""
+ },
+ "name": "docker-sock"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/hostname",
+ "type": ""
+ },
+ "name": "container-hostname"
+ },
+ {
+ "hostPath": {
+ "path": "/var/log",
+ "type": ""
+ },
+ "name": "host-log"
+ },
+ {
+ "hostPath": {
+ "path": "/var/lib/docker/containers",
+ "type": ""
+ },
+ "name": "containerlog-path"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes",
+ "type": ""
+ },
+ "name": "azure-json-path"
+ },
+ {
+ "name": "omsagent-secret",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-secret"
+ }
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "container-azm-ms-agentconfig",
+ "optional": true
+ },
+ "name": "settings-vol-config"
+ },
+ {
+ "name": "omsagent-token-fjmqb",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-token-fjmqb"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:53:57Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:54:44Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:53:57Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://acd5cedc2c5874122047c47bb1398f35a7c0297292fc4a0e01345123c233d19a",
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "lastState": {},
+ "name": "omsagent",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T19:54:43Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.7",
+ "phase": "Running",
+ "podIP": "10.244.12.169",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T19:53:57Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "agentVersion": "1.10.0.1",
+ "dockerProviderVersion": "6.0.0-0",
+ "schema-versions": "v1"
+ },
+ "creationTimestamp": "2019-08-23T19:51:35Z",
+ "generateName": "omsagent-",
+ "labels": {
+ "controller-revision-hash": "868116844",
+ "dsName": "omsagent-ds",
+ "pod-template-generation": "9"
+ },
+ "name": "omsagent-4tncr",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "omsagent",
+ "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f"
+ }
+ ],
+ "resourceVersion": "19063468",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-4tncr",
+ "uid": "69e68b21-c5df-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "AKS_RESOURCE_ID",
+ "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test"
+ },
+ {
+ "name": "AKS_REGION",
+ "value": "eastus"
+ },
+ {
+ "name": "CONTROLLER_TYPE",
+ "value": "DaemonSet"
+ },
+ {
+ "name": "NODE_IP",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "status.hostIP"
+ }
+ }
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/livenessprobe.sh"
+ ]
+ },
+ "failureThreshold": 3,
+ "initialDelaySeconds": 60,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "omsagent",
+ "ports": [
+ {
+ "containerPort": 25225,
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 25224,
+ "protocol": "UDP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "150m",
+ "memory": "600Mi"
+ },
+ "requests": {
+ "cpu": "75m",
+ "memory": "225Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/hostfs",
+ "name": "host-root",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/host",
+ "name": "docker-sock"
+ },
+ {
+ "mountPath": "/var/log",
+ "name": "host-log"
+ },
+ {
+ "mountPath": "/var/lib/docker/containers",
+ "name": "containerlog-path"
+ },
+ {
+ "mountPath": "/etc/kubernetes/host",
+ "name": "azure-json-path"
+ },
+ {
+ "mountPath": "/etc/omsagent-secret",
+ "name": "omsagent-secret"
+ },
+ {
+ "mountPath": "/etc/config/settings",
+ "name": "settings-vol-config",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "omsagent-token-fjmqb",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "nodeName": "aks-nodepool1-19574989-1",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "omsagent",
+ "serviceAccountName": "omsagent",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoSchedule",
+ "key": "node-role.kubernetes.io/master",
+ "operator": "Equal",
+ "value": "true"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/",
+ "type": ""
+ },
+ "name": "host-root"
+ },
+ {
+ "hostPath": {
+ "path": "/var/run",
+ "type": ""
+ },
+ "name": "docker-sock"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/hostname",
+ "type": ""
+ },
+ "name": "container-hostname"
+ },
+ {
+ "hostPath": {
+ "path": "/var/log",
+ "type": ""
+ },
+ "name": "host-log"
+ },
+ {
+ "hostPath": {
+ "path": "/var/lib/docker/containers",
+ "type": ""
+ },
+ "name": "containerlog-path"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes",
+ "type": ""
+ },
+ "name": "azure-json-path"
+ },
+ {
+ "name": "omsagent-secret",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-secret"
+ }
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "container-azm-ms-agentconfig",
+ "optional": true
+ },
+ "name": "settings-vol-config"
+ },
+ {
+ "name": "omsagent-token-fjmqb",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-token-fjmqb"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:51:35Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:52:28Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:51:35Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://7803b80452aa34460c848d9c1ca65d6bd925665cf78faaa8dbc122482f93c744",
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "lastState": {},
+ "name": "omsagent",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T19:52:27Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.5",
+ "phase": "Running",
+ "podIP": "10.244.0.251",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T19:51:35Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "agentVersion": "1.10.0.1",
+ "dockerProviderVersion": "6.0.0-0",
+ "schema-versions": "v1"
+ },
+ "creationTimestamp": "2019-08-23T19:53:36Z",
+ "generateName": "omsagent-",
+ "labels": {
+ "controller-revision-hash": "868116844",
+ "dsName": "omsagent-ds",
+ "pod-template-generation": "9"
+ },
+ "name": "omsagent-h44fk",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "omsagent",
+ "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f"
+ }
+ ],
+ "resourceVersion": "19063631",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-h44fk",
+ "uid": "b1e04e1c-c5df-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "AKS_RESOURCE_ID",
+ "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test"
+ },
+ {
+ "name": "AKS_REGION",
+ "value": "eastus"
+ },
+ {
+ "name": "CONTROLLER_TYPE",
+ "value": "DaemonSet"
+ },
+ {
+ "name": "NODE_IP",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "status.hostIP"
+ }
+ }
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/livenessprobe.sh"
+ ]
+ },
+ "failureThreshold": 3,
+ "initialDelaySeconds": 60,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "omsagent",
+ "ports": [
+ {
+ "containerPort": 25225,
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 25224,
+ "protocol": "UDP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "150m",
+ "memory": "600Mi"
+ },
+ "requests": {
+ "cpu": "75m",
+ "memory": "225Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/hostfs",
+ "name": "host-root",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/host",
+ "name": "docker-sock"
+ },
+ {
+ "mountPath": "/var/log",
+ "name": "host-log"
+ },
+ {
+ "mountPath": "/var/lib/docker/containers",
+ "name": "containerlog-path"
+ },
+ {
+ "mountPath": "/etc/kubernetes/host",
+ "name": "azure-json-path"
+ },
+ {
+ "mountPath": "/etc/omsagent-secret",
+ "name": "omsagent-secret"
+ },
+ {
+ "mountPath": "/etc/config/settings",
+ "name": "settings-vol-config",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "omsagent-token-fjmqb",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "nodeName": "aks-nodepool1-19574989-0",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "omsagent",
+ "serviceAccountName": "omsagent",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoSchedule",
+ "key": "node-role.kubernetes.io/master",
+ "operator": "Equal",
+ "value": "true"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/",
+ "type": ""
+ },
+ "name": "host-root"
+ },
+ {
+ "hostPath": {
+ "path": "/var/run",
+ "type": ""
+ },
+ "name": "docker-sock"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/hostname",
+ "type": ""
+ },
+ "name": "container-hostname"
+ },
+ {
+ "hostPath": {
+ "path": "/var/log",
+ "type": ""
+ },
+ "name": "host-log"
+ },
+ {
+ "hostPath": {
+ "path": "/var/lib/docker/containers",
+ "type": ""
+ },
+ "name": "containerlog-path"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes",
+ "type": ""
+ },
+ "name": "azure-json-path"
+ },
+ {
+ "name": "omsagent-secret",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-secret"
+ }
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "container-azm-ms-agentconfig",
+ "optional": true
+ },
+ "name": "settings-vol-config"
+ },
+ {
+ "name": "omsagent-token-fjmqb",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-token-fjmqb"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:53:36Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:53:51Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:53:36Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://4b71a82e472a8e5d0bc4ef9b9b5d2ccf25741b31269480a77e29424ebe87757c",
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "lastState": {},
+ "name": "omsagent",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T19:53:49Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.4",
+ "phase": "Running",
+ "podIP": "10.244.1.35",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T19:53:36Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "agentVersion": "1.10.0.1",
+ "dockerProviderVersion": "6.0.0-0",
+ "schema-versions": "v1"
+ },
+ "creationTimestamp": "2019-08-23T19:51:28Z",
+ "generateName": "omsagent-rs-5bb85d7468-",
+ "labels": {
+ "pod-template-hash": "1664183024",
+ "rsName": "omsagent-rs"
+ },
+ "name": "omsagent-rs-5bb85d7468-dnxpw",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "omsagent-rs-5bb85d7468",
+ "uid": "659ec974-c5df-11e9-8736-86290fd7dd1f"
+ }
+ ],
+ "resourceVersion": "19063495",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-rs-5bb85d7468-dnxpw",
+ "uid": "65a6f978-c5df-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "AKS_RESOURCE_ID",
+ "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test"
+ },
+ {
+ "name": "AKS_REGION",
+ "value": "eastus"
+ },
+ {
+ "name": "CONTROLLER_TYPE",
+ "value": "ReplicaSet"
+ },
+ {
+ "name": "NODE_IP",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "status.hostIP"
+ }
+ }
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/livenessprobe.sh"
+ ]
+ },
+ "failureThreshold": 3,
+ "initialDelaySeconds": 60,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "omsagent",
+ "ports": [
+ {
+ "containerPort": 25225,
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 25224,
+ "protocol": "UDP"
+ },
+ {
+ "containerPort": 25227,
+ "name": "in-rs-tcp",
+ "protocol": "TCP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "150m",
+ "memory": "500Mi"
+ },
+ "requests": {
+ "cpu": "110m",
+ "memory": "250Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/var/run/host",
+ "name": "docker-sock"
+ },
+ {
+ "mountPath": "/var/log",
+ "name": "host-log"
+ },
+ {
+ "mountPath": "/var/lib/docker/containers",
+ "name": "containerlog-path"
+ },
+ {
+ "mountPath": "/etc/kubernetes/host",
+ "name": "azure-json-path"
+ },
+ {
+ "mountPath": "/etc/omsagent-secret",
+ "name": "omsagent-secret",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/config",
+ "name": "omsagent-rs-config"
+ },
+ {
+ "mountPath": "/etc/config/settings",
+ "name": "settings-vol-config",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "omsagent-token-fjmqb",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "nodeName": "aks-nodepool1-19574989-0",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux",
+ "kubernetes.io/role": "agent"
+ },
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "omsagent",
+ "serviceAccountName": "omsagent",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/var/run",
+ "type": ""
+ },
+ "name": "docker-sock"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/hostname",
+ "type": ""
+ },
+ "name": "container-hostname"
+ },
+ {
+ "hostPath": {
+ "path": "/var/log",
+ "type": ""
+ },
+ "name": "host-log"
+ },
+ {
+ "hostPath": {
+ "path": "/var/lib/docker/containers",
+ "type": ""
+ },
+ "name": "containerlog-path"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes",
+ "type": ""
+ },
+ "name": "azure-json-path"
+ },
+ {
+ "name": "omsagent-secret",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-secret"
+ }
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "omsagent-rs-config"
+ },
+ "name": "omsagent-rs-config"
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "container-azm-ms-agentconfig",
+ "optional": true
+ },
+ "name": "settings-vol-config"
+ },
+ {
+ "name": "omsagent-token-fjmqb",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-token-fjmqb"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:51:28Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:52:37Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:51:28Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://7e080036bc213a7dadd95b1d8439e06a1b62822219642a83cab059dc4292b0e5",
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "lastState": {},
+ "name": "omsagent",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T19:52:37Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.4",
+ "phase": "Running",
+ "podIP": "10.244.1.34",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T19:51:28Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "annotations": {
+ "agentVersion": "1.10.0.1",
+ "dockerProviderVersion": "6.0.0-0",
+ "schema-versions": "v1"
+ },
+ "creationTimestamp": "2019-08-23T19:52:35Z",
+ "generateName": "omsagent-",
+ "labels": {
+ "controller-revision-hash": "868116844",
+ "dsName": "omsagent-ds",
+ "pod-template-generation": "9"
+ },
+ "name": "omsagent-sb6xx",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "DaemonSet",
+ "name": "omsagent",
+ "uid": "e2f8c552-c2d2-11e9-8736-86290fd7dd1f"
+ }
+ ],
+ "resourceVersion": "19063577",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/omsagent-sb6xx",
+ "uid": "8dbd5e8b-c5df-11e9-8736-86290fd7dd1f"
+ },
+ "spec": {
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "AKS_RESOURCE_ID",
+ "value": "/subscriptions/72c8e8ca-dc16-47dc-b65c-6b5875eb600a/resourcegroups/dilipr-health-test/providers/Microsoft.ContainerService/managedClusters/dilipr-health-test"
+ },
+ {
+ "name": "AKS_REGION",
+ "value": "eastus"
+ },
+ {
+ "name": "CONTROLLER_TYPE",
+ "value": "DaemonSet"
+ },
+ {
+ "name": "NODE_IP",
+ "valueFrom": {
+ "fieldRef": {
+ "apiVersion": "v1",
+ "fieldPath": "status.hostIP"
+ }
+ }
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/livenessprobe.sh"
+ ]
+ },
+ "failureThreshold": 3,
+ "initialDelaySeconds": 60,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "omsagent",
+ "ports": [
+ {
+ "containerPort": 25225,
+ "protocol": "TCP"
+ },
+ {
+ "containerPort": 25224,
+ "protocol": "UDP"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "cpu": "150m",
+ "memory": "600Mi"
+ },
+ "requests": {
+ "cpu": "75m",
+ "memory": "225Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/hostfs",
+ "name": "host-root",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/host",
+ "name": "docker-sock"
+ },
+ {
+ "mountPath": "/var/log",
+ "name": "host-log"
+ },
+ {
+ "mountPath": "/var/lib/docker/containers",
+ "name": "containerlog-path"
+ },
+ {
+ "mountPath": "/etc/kubernetes/host",
+ "name": "azure-json-path"
+ },
+ {
+ "mountPath": "/etc/omsagent-secret",
+ "name": "omsagent-secret"
+ },
+ {
+ "mountPath": "/etc/config/settings",
+ "name": "settings-vol-config",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "omsagent-token-fjmqb",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "ClusterFirst",
+ "nodeName": "aks-nodepool1-19574989-3",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 0,
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "omsagent",
+ "serviceAccountName": "omsagent",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "effect": "NoSchedule",
+ "key": "node-role.kubernetes.io/master",
+ "operator": "Equal",
+ "value": "true"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/disk-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/memory-pressure",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoSchedule",
+ "key": "node.kubernetes.io/unschedulable",
+ "operator": "Exists"
+ }
+ ],
+ "volumes": [
+ {
+ "hostPath": {
+ "path": "/",
+ "type": ""
+ },
+ "name": "host-root"
+ },
+ {
+ "hostPath": {
+ "path": "/var/run",
+ "type": ""
+ },
+ "name": "docker-sock"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/hostname",
+ "type": ""
+ },
+ "name": "container-hostname"
+ },
+ {
+ "hostPath": {
+ "path": "/var/log",
+ "type": ""
+ },
+ "name": "host-log"
+ },
+ {
+ "hostPath": {
+ "path": "/var/lib/docker/containers",
+ "type": ""
+ },
+ "name": "containerlog-path"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes",
+ "type": ""
+ },
+ "name": "azure-json-path"
+ },
+ {
+ "name": "omsagent-secret",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-secret"
+ }
+ },
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "container-azm-ms-agentconfig",
+ "optional": true
+ },
+ "name": "settings-vol-config"
+ },
+ {
+ "name": "omsagent-token-fjmqb",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "omsagent-token-fjmqb"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:52:35Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:53:25Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-23T19:52:35Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://f4f0cb19e5da394a4332847953c18d9321319f2ef422533b890ab844cb997879",
+ "image": "mcr.microsoft.com/azuremonitor/containerinsights/ciprod:ciprod08222019",
+ "imageID": "docker-pullable://mcr.microsoft.com/azuremonitor/containerinsights/ciprod@sha256:69b420bdb4081293c37e2d0f8ad2e4054bd516f5c08c7512d6b695660a36eccf",
+ "lastState": {},
+ "name": "omsagent",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-23T19:53:24Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.6",
+ "phase": "Running",
+ "podIP": "10.244.2.62",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-23T19:52:35Z"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Pod",
+ "metadata": {
+ "creationTimestamp": "2019-08-12T20:28:08Z",
+ "generateName": "tunnelfront-65c8cfb7cc-",
+ "labels": {
+ "component": "tunnel",
+ "pod-template-hash": "2174796377"
+ },
+ "name": "tunnelfront-65c8cfb7cc-z8srb",
+ "namespace": "kube-system",
+ "ownerReferences": [
+ {
+ "apiVersion": "apps/v1",
+ "blockOwnerDeletion": true,
+ "controller": true,
+ "kind": "ReplicaSet",
+ "name": "tunnelfront-65c8cfb7cc",
+ "uid": "7013afa3-a742-11e9-a08d-96dd47774ee5"
+ }
+ ],
+ "resourceVersion": "17628809",
+ "selfLink": "/api/v1/namespaces/kube-system/pods/tunnelfront-65c8cfb7cc-z8srb",
+ "uid": "b2a0e1b3-bd3f-11e9-b2a7-d61658c73830"
+ },
+ "spec": {
+ "affinity": {
+ "nodeAffinity": {
+ "requiredDuringSchedulingIgnoredDuringExecution": {
+ "nodeSelectorTerms": [
+ {
+ "matchExpressions": [
+ {
+ "key": "kubernetes.azure.com/cluster",
+ "operator": "Exists"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ },
+ "containers": [
+ {
+ "env": [
+ {
+ "name": "OVERRIDE_TUNNEL_SERVER_NAME",
+ "value": "t_dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "TUNNEL_CLUSTERUSER_NAME",
+ "value": "28957308"
+ },
+ {
+ "name": "TUNNELGATEWAY_SERVER_NAME",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-0b16acad.tun.eastus.azmk8s.io"
+ },
+ {
+ "name": "TUNNELGATEWAY_SSH_PORT",
+ "value": "22"
+ },
+ {
+ "name": "TUNNELGATEWAY_TLS_PORT",
+ "value": "443"
+ },
+ {
+ "name": "KUBE_CONFIG",
+ "value": "/etc/kubernetes/kubeconfig/kubeconfig"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP_ADDR",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ },
+ {
+ "name": "KUBERNETES_PORT",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_PORT_443_TCP",
+ "value": "tcp://dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io:443"
+ },
+ {
+ "name": "KUBERNETES_SERVICE_HOST",
+ "value": "dilipr-hea-dilipr-health-te-72c8e8-d3ccfd8f.hcp.eastus.azmk8s.io"
+ }
+ ],
+ "image": "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7",
+ "imagePullPolicy": "IfNotPresent",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/lib/tunnel-front/check-tunnel-connection.sh"
+ ]
+ },
+ "failureThreshold": 12,
+ "initialDelaySeconds": 10,
+ "periodSeconds": 60,
+ "successThreshold": 1,
+ "timeoutSeconds": 1
+ },
+ "name": "tunnel-front",
+ "resources": {
+ "requests": {
+ "cpu": "10m",
+ "memory": "64Mi"
+ }
+ },
+ "securityContext": {
+ "privileged": true
+ },
+ "terminationMessagePath": "/dev/termination-log",
+ "terminationMessagePolicy": "File",
+ "volumeMounts": [
+ {
+ "mountPath": "/etc/kubernetes/kubeconfig",
+ "name": "kubeconfig",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/etc/kubernetes/certs",
+ "name": "certificates",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
+ "name": "tunnelfront-token-njgvg",
+ "readOnly": true
+ }
+ ]
+ }
+ ],
+ "dnsPolicy": "Default",
+ "imagePullSecrets": [
+ {
+ "name": "emptyacrsecret"
+ }
+ ],
+ "nodeName": "aks-nodepool1-19574989-3",
+ "nodeSelector": {
+ "beta.kubernetes.io/os": "linux"
+ },
+ "priority": 2000001000,
+ "priorityClassName": "system-node-critical",
+ "restartPolicy": "Always",
+ "schedulerName": "default-scheduler",
+ "securityContext": {},
+ "serviceAccount": "tunnelfront",
+ "serviceAccountName": "tunnelfront",
+ "terminationGracePeriodSeconds": 30,
+ "tolerations": [
+ {
+ "key": "CriticalAddonsOnly",
+ "operator": "Exists"
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/not-ready",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ },
+ {
+ "effect": "NoExecute",
+ "key": "node.kubernetes.io/unreachable",
+ "operator": "Exists",
+ "tolerationSeconds": 300
+ }
+ ],
+ "volumes": [
+ {
+ "configMap": {
+ "defaultMode": 420,
+ "name": "tunnelfront-kubecfg",
+ "optional": true
+ },
+ "name": "kubeconfig"
+ },
+ {
+ "hostPath": {
+ "path": "/etc/kubernetes/certs",
+ "type": ""
+ },
+ "name": "certificates"
+ },
+ {
+ "name": "tunnelfront-token-njgvg",
+ "secret": {
+ "defaultMode": 420,
+ "secretName": "tunnelfront-token-njgvg"
+ }
+ }
+ ]
+ },
+ "status": {
+ "conditions": [
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-12T20:28:08Z",
+ "status": "True",
+ "type": "Initialized"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-12T20:28:13Z",
+ "status": "True",
+ "type": "Ready"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": null,
+ "status": "True",
+ "type": "ContainersReady"
+ },
+ {
+ "lastProbeTime": null,
+ "lastTransitionTime": "2019-08-12T20:28:08Z",
+ "status": "True",
+ "type": "PodScheduled"
+ }
+ ],
+ "containerStatuses": [
+ {
+ "containerID": "docker://ac3b7482b15ba1f825e7a9ceef11defaccdc2682b9a20bb7c98bc307a8a34cf6",
+ "image": "aksrepos.azurecr.io/prod/hcp-tunnel-front:v1.9.2-v4.0.7",
+ "imageID": "docker-pullable://aksrepos.azurecr.io/prod/hcp-tunnel-front@sha256:68878ee3ea1781b322ea3952c3370e31dd89be8bb0864e2bf27bdba6dc904c41",
+ "lastState": {},
+ "name": "tunnel-front",
+ "ready": true,
+ "restartCount": 0,
+ "state": {
+ "running": {
+ "startedAt": "2019-08-12T20:28:13Z"
+ }
+ }
+ }
+ ],
+ "hostIP": "10.240.0.6",
+ "phase": "Running",
+ "podIP": "10.244.2.10",
+ "qosClass": "Burstable",
+ "startTime": "2019-08-12T20:28:08Z"
+ }
+ }
+ ],
+ "kind": "List",
+ "metadata": {
+ "resourceVersion": "",
+ "selfLink": ""
+ }
+}