diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/rule.yml new file mode 100644 index 000000000000..bbe60fc0aa5e --- /dev/null +++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/rule.yml @@ -0,0 +1,72 @@ +documentation_complete: true + +prodtype: ocp4 + +title: 'Ensure Eviction threshold Settings Are Set - evictionHard: imagefs.available' + +description: |- +
Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the imagefs.available setting of the evictionHard + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84144-5 + +ocil_clause: 'imagefs.available is not set in evictionHard section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionHard."imagefs.available"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionhard_imagefs_available") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionHard['imagefs.available']"
+ xccdf_variable: var_kubelet_evictionhard_imagefs_available
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_available/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/rule.yml
new file mode 100644
index 000000000000..fbae5e4a215f
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionHard: imagefs.inodesFree'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the imagefs.inodesFree setting of the evictionHard + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84147-8 + +ocil_clause: 'imagefs.inodesFree is not set in evictionHard section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionHard."imagefs.inodesFree"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionhard_imagefs_inodesfree") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionHard['imagefs.inodesFree']"
+ xccdf_variable: var_kubelet_evictionhard_imagefs_inodesfree
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_imagefs_inodesfree/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/rule.yml
new file mode 100644
index 000000000000..307fd56f51b1
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionHard: memory.available'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the memory.available setting of the evictionHard + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84135-3 + +ocil_clause: 'memory.available is not set in evictionHard section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionHard."memory.available"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionhard_memory_available") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionHard['memory.available']"
+ xccdf_variable: var_kubelet_evictionhard_memory_available
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_memory_available/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/rule.yml
new file mode 100644
index 000000000000..0340b5a5250e
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionHard: nodefs.available'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.6/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the nodefs.available setting of the evictionHard + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84138-7 + +ocil_clause: 'nodefs.available is not set in evictionHard section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionHard."nodefs.available"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionhard_nodefs_available") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionHard['nodefs.available']"
+ xccdf_variable: var_kubelet_evictionhard_nodefs_available
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_available/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/rule.yml
new file mode 100644
index 000000000000..558542283ef2
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionHard: nodefs.inodesFree'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the nodefs.inodesFree setting of the evictionHard + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84141-1 + +ocil_clause: 'nodefs.inodesFree is not set in evictionHard section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionHard."nodefs.inodesFree"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionhard_nodefs_inodesfree") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionHard['nodefs.inodesFree']"
+ xccdf_variable: var_kubelet_evictionhard_nodefs_inodesfree
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_hard_nodefs_inodesfree/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/rule.yml
new file mode 100644
index 000000000000..b8c2e79f2b5a
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionSoft: imagefs.available'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the imagefs.available setting of the evictionSoft + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84127-0 + +ocil_clause: 'imagefs.available is not set in evictionSoft section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionSoft."imagefs.available"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionsoft_imagefs_available") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionSoft['imagefs.available']"
+ xccdf_variable: var_kubelet_evictionsoft_imagefs_available
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_available/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/rule.yml
new file mode 100644
index 000000000000..08547a0f27f0
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionSoft: imagefs.inodesFree'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the imagefs.inodesFree setting of the evictionSoft + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84132-0 + +ocil_clause: 'imagefs.inodesFree is not set in evictionSoft section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionSoft."imagefs.inodesFree"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionsoft_imagefs_inodesfree") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionSoft['imagefs.inodesFree']"
+ xccdf_variable: var_kubelet_evictionsoft_imagefs_inodesfree
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_imagefs_inodesfree/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_memory_available/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_memory_available/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_memory_available/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_memory_available/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_memory_available/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_memory_available/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/rule.yml
new file mode 100644
index 000000000000..7ef43af8c198
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionSoft: nodefs.available'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.6/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the nodefs.available setting of the evictionSoft + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84119-7 + +ocil_clause: 'nodefs.available is not set in evictionSoft section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionSoft."nodefs.available"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionsoft_nodefs_available") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionSoft['nodefs.available']"
+ xccdf_variable: var_kubelet_evictionsoft_nodefs_available
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_available/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/rule.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/rule.yml
new file mode 100644
index 000000000000..15154c6764e5
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/rule.yml
@@ -0,0 +1,72 @@
+documentation_complete: true
+
+prodtype: ocp4
+
+title: 'Ensure Eviction threshold Settings Are Set - evictionSoft: nodefs.inodesFree'
+
+description: |-
+ Two types of garbage collection are performed on an OpenShift Container Platform node:
+ ++ Container garbage collection can be performed using eviction thresholds. + Image garbage collection relies on disk usage as reported by cAdvisor on the + node to decide which images to remove from the node. +
+ ++ The OpenShift administrator can configure how OpenShift Container Platform + performs garbage collection by creating a kubeletConfig object for each + Machine Config Pool using any combination of the following: +
+ ++ To configure, follow the directions in + {{{ weblink(link="https://docs.openshift.com/container-platform/4.5/nodes/nodes/nodes-nodes-garbage-collection.html#nodes-nodes-garbage-collection-configuring_nodes-nodes-configuring", + text="the documentation") }}} +
+ ++ This rule pertains to the nodefs.inodesFree setting of the evictionSoft + section. +
+ +rationale: |- + Garbage collection is important to ensure sufficient resource availability + and avoiding degraded performance and availability. In the worst case, the + system might crash or just be unusable for a long period of time. + Based on your system resources and tests, choose an appropriate threshold + value to activate garbage collection. + +severity: medium + +references: + cis: 1.3.1 + +identifiers: + cce@ocp4: CCE-84123-9 + +ocil_clause: 'nodefs.inodesFree is not set in evictionSoft section' + +ocil: |- + Run the following command on the kubelet node(s): +$ oc debug -q node/$NODE -- jq -r '.evictionSoft."nodefs.inodesFree"' /host/etc/kubernetes/kubelet.conf+ and make sure it outputs +
{{{ xccdf_value("var_kubelet_evictionsoft_nodefs_inodesfree") }}}
+
+template:
+ name: yamlfile_value
+ vars:
+ filepath: /etc/kubernetes/kubelet.conf
+ yamlpath: ".evictionSoft['nodefs.inodesFree']"
+ xccdf_variable: var_kubelet_evictionsoft_nodefs_inodesfree
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/tests/ocp4/e2e-remediation.sh b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/tests/ocp4/e2e-remediation.sh
new file mode 100755
index 000000000000..5de2c6570359
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/tests/ocp4/e2e-remediation.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+set -xe
+
+echo "applying sysctls"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-sysctls-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "applying kubeletConfig"
+oc apply --server-side -f ${ROOT_DIR}/ocp-resources/kubelet-config-mc.yaml
+
+sleep 30
+
+echo "waiting for workers to update"
+while true; do
+ status=$(oc get mcp/worker | grep worker | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+echo "waiting for masters to update"
+while true; do
+ status=$(oc get mcp/master | grep master | awk '{ print $3 $4 }')
+ if [ "$status" == "TrueFalse" ]; then
+ break
+ fi
+ sleep 1
+done
+
+exit 0
diff --git a/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/tests/ocp4/e2e.yml b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/tests/ocp4/e2e.yml
new file mode 100644
index 000000000000..fd9b313e87b4
--- /dev/null
+++ b/applications/openshift/kubelet/kubelet_eviction_thresholds_set_soft_nodefs_inodesfree/tests/ocp4/e2e.yml
@@ -0,0 +1,3 @@
+---
+default_result: FAIL
+result_after_remediation: PASS
diff --git a/applications/openshift/kubelet/var_kubelet_evictionhard_imagefs_available.var b/applications/openshift/kubelet/var_kubelet_evictionhard_imagefs_available.var
new file mode 100644
index 000000000000..590347db1e97
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionhard_imagefs_available.var
@@ -0,0 +1,18 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictonHard Image FS Avilable'
+
+description: 'Image FS Available for the EvictonHard threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "10%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/applications/openshift/kubelet/var_kubelet_evictionhard_imagefs_inodesfree.var b/applications/openshift/kubelet/var_kubelet_evictionhard_imagefs_inodesfree.var
new file mode 100644
index 000000000000..ba63b94a4911
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionhard_imagefs_inodesfree.var
@@ -0,0 +1,18 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictonHard Image FS inodes Free'
+
+description: 'Image FS inodes Free for the EvictonHard threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "5%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/applications/openshift/kubelet/var_kubelet_evictionhard_memory_available.var b/applications/openshift/kubelet/var_kubelet_evictionhard_memory_available.var
new file mode 100644
index 000000000000..30dfe1e27f5c
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionhard_memory_available.var
@@ -0,0 +1,14 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictonHard Memory Avilable'
+
+description: 'Memory Available for the EvictonHard threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: 200Mi
diff --git a/applications/openshift/kubelet/var_kubelet_evictionhard_nodefs_available.var b/applications/openshift/kubelet/var_kubelet_evictionhard_nodefs_available.var
new file mode 100644
index 000000000000..34b8fecd3a4b
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionhard_nodefs_available.var
@@ -0,0 +1,18 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictonHard NodeFS Available'
+
+description: 'Node FS Available for the EvictonHard threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "5%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/applications/openshift/kubelet/var_kubelet_evictionhard_nodefs_inodesfree.var b/applications/openshift/kubelet/var_kubelet_evictionhard_nodefs_inodesfree.var
new file mode 100644
index 000000000000..b6f11248f771
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionhard_nodefs_inodesfree.var
@@ -0,0 +1,19 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictonHard Node FS inodes Free'
+
+description: 'Node FS inodes Free for the EvictonHard threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "4%"
+ 4pc: "4%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/applications/openshift/kubelet/var_kubelet_evictionsoft_imagefs_available.var b/applications/openshift/kubelet/var_kubelet_evictionsoft_imagefs_available.var
new file mode 100644
index 000000000000..dccdc7c5878f
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionsoft_imagefs_available.var
@@ -0,0 +1,18 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictionSoft Image FS Avilable'
+
+description: 'Image FS Available for the EvictionSoft threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "15%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/applications/openshift/kubelet/var_kubelet_evictionsoft_imagefs_inodesfree.var b/applications/openshift/kubelet/var_kubelet_evictionsoft_imagefs_inodesfree.var
new file mode 100644
index 000000000000..7ff3a9728fd7
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionsoft_imagefs_inodesfree.var
@@ -0,0 +1,18 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictionSoft Image FS inodes Free'
+
+description: 'Image FS inodes Free for the EvictionSoft threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "10%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/applications/openshift/kubelet/var_kubelet_evictionsoft_memory_available.var b/applications/openshift/kubelet/var_kubelet_evictionsoft_memory_available.var
index 607631aea2e5..a570503b8197 100644
--- a/applications/openshift/kubelet/var_kubelet_evictionsoft_memory_available.var
+++ b/applications/openshift/kubelet/var_kubelet_evictionsoft_memory_available.var
@@ -11,4 +11,4 @@ operator: equals
interactive: false
options:
- default: 500Mi
+ default: 500Mi
diff --git a/applications/openshift/kubelet/var_kubelet_evictionsoft_nodefs_available.var b/applications/openshift/kubelet/var_kubelet_evictionsoft_nodefs_available.var
new file mode 100644
index 000000000000..907b357a92ee
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionsoft_nodefs_available.var
@@ -0,0 +1,18 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictionSoft NodeFS Available'
+
+description: 'Node FS Available for the EvictionSoft threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "10%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/applications/openshift/kubelet/var_kubelet_evictionsoft_nodefs_inodesfree.var b/applications/openshift/kubelet/var_kubelet_evictionsoft_nodefs_inodesfree.var
new file mode 100644
index 000000000000..cff89fc596f1
--- /dev/null
+++ b/applications/openshift/kubelet/var_kubelet_evictionsoft_nodefs_inodesfree.var
@@ -0,0 +1,18 @@
+documentation_complete: true
+
+title: 'Configure Kubelet EvictionSoft Node FS inodes Free'
+
+description: 'Node FS inodes Free for the EvictionSoft threshold to trigger.'
+
+type: string
+
+operator: equals
+
+interactive: false
+
+options:
+ default: "5%"
+ 5pc: "5%"
+ 10pc: "10%"
+ 15pc: "15%"
+ 20pc: "20%"
diff --git a/ocp-resources/kubelet-config-mc.yaml b/ocp-resources/kubelet-config-mc.yaml
index 99f0a69cab61..96dadd6e5e40 100644
--- a/ocp-resources/kubelet-config-mc.yaml
+++ b/ocp-resources/kubelet-config-mc.yaml
@@ -12,6 +12,28 @@ spec:
kubeletConfig:
protectKernelDefaults: true
eventRecordQPS: 10
+ evictionSoft:
+ memory.available: "500Mi"
+ nodefs.available: "10%"
+ nodefs.inodesFree: "5%"
+ imagefs.available: "15%"
+ imagefs.inodesFree: "10%"
+ evictionSoftGracePeriod:
+ memory.available: "1m30s"
+ nodefs.available: "1m30s"
+ nodefs.inodesFree: "1m30s"
+ imagefs.available: "1m30s"
+ imagefs.inodesFree: "1m30s"
+ evictionHard:
+ memory.available: "200Mi"
+ nodefs.available: "5%"
+ nodefs.inodesFree: "4%"
+ imagefs.available: "10%"
+ imagefs.inodesFree: "5%"
+ evictionPressureTransitionPeriod: 0s
+ imageMinimumGCAge: 5m
+ imageGCHighThresholdPercent: 80
+ imageGCLowThresholdPercent: 75
---
apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
@@ -24,3 +46,25 @@ spec:
kubeletConfig:
protectKernelDefaults: true
eventRecordQPS: 10
+ evictionSoft:
+ memory.available: "500Mi"
+ nodefs.available: "10%"
+ nodefs.inodesFree: "5%"
+ imagefs.available: "15%"
+ imagefs.inodesFree: "10%"
+ evictionSoftGracePeriod:
+ memory.available: "1m30s"
+ nodefs.available: "1m30s"
+ nodefs.inodesFree: "1m30s"
+ imagefs.available: "1m30s"
+ imagefs.inodesFree: "1m30s"
+ evictionHard:
+ memory.available: "200Mi"
+ nodefs.available: "5%"
+ nodefs.inodesFree: "4%"
+ imagefs.available: "10%"
+ imagefs.inodesFree: "5%"
+ evictionPressureTransitionPeriod: 0s
+ imageMinimumGCAge: 5m
+ imageGCHighThresholdPercent: 80
+ imageGCLowThresholdPercent: 75
diff --git a/ocp4/profiles/cis-node.profile b/ocp4/profiles/cis-node.profile
index e17595e19e80..a0a7f85c4237 100644
--- a/ocp4/profiles/cis-node.profile
+++ b/ocp4/profiles/cis-node.profile
@@ -113,7 +113,17 @@ selections:
# 1.1.21 Ensure that the OpenShift PKI key file permissions are set to 600
- file_permissions_openshift_pki_key_files
#### 1.3 Controller Manager
- # 1.3.1 Ensure that garbage collection is configured as appropriate (Manual)
+ # 1.3.1 Ensure that garbage collection is configured as appropriate
+ - kubelet_eviction_thresholds_set_soft_memory_available
+ - kubelet_eviction_thresholds_set_soft_nodefs_available
+ - kubelet_eviction_thresholds_set_soft_nodefs_inodesfree
+ - kubelet_eviction_thresholds_set_soft_imagefs_available
+ - kubelet_eviction_thresholds_set_soft_imagefs_inodesfree
+ - kubelet_eviction_thresholds_set_hard_memory_available
+ - kubelet_eviction_thresholds_set_hard_nodefs_available
+ - kubelet_eviction_thresholds_set_hard_nodefs_inodesfree
+ - kubelet_eviction_thresholds_set_hard_imagefs_available
+ - kubelet_eviction_thresholds_set_hard_imagefs_inodesfree
### 2 etcd
# 2.7 Ensure that a unique Certificate Authority is used for etcd
@@ -123,7 +133,6 @@ selections:
###
#### 3.2 Logging
# 3.2.1 Ensure that a minimal audit policy is created
- - kubelet_eviction_thresholds_set_soft_memory_available
### 4 Worker Nodes
###
diff --git a/shared/references/cce-redhat-avail.txt b/shared/references/cce-redhat-avail.txt
index 1df5db35b51f..a6439a570f94 100644
--- a/shared/references/cce-redhat-avail.txt
+++ b/shared/references/cce-redhat-avail.txt
@@ -494,34 +494,25 @@ CCE-84115-5
CCE-84116-3
CCE-84117-1
CCE-84118-9
-CCE-84119-7
CCE-84120-5
CCE-84121-3
CCE-84122-1
-CCE-84123-9
CCE-84124-7
CCE-84125-4
CCE-84126-2
-CCE-84127-0
CCE-84128-8
CCE-84130-4
CCE-84131-2
-CCE-84132-0
CCE-84133-8
CCE-84134-6
-CCE-84135-3
CCE-84136-1
CCE-84137-9
-CCE-84138-7
CCE-84139-5
CCE-84140-3
-CCE-84141-1
CCE-84142-9
CCE-84143-7
-CCE-84144-5
CCE-84145-2
CCE-84146-0
-CCE-84147-8
CCE-84149-4
CCE-84150-2
CCE-84151-0