From a717d6d4c1533ed13463bf80a74c0dcb686fc687 Mon Sep 17 00:00:00 2001 From: rolfedh Date: Mon, 8 Mar 2021 08:40:03 -0500 Subject: [PATCH] Manual CP of Rhdevdocs 2752 #30170 to enterprise-4.5 --- modules/cluster-logging-about-crd.adoc | 9 +++++---- modules/cluster-logging-deploy-cli.adoc | 9 +++++---- modules/cluster-logging-deploy-console.adoc | 9 +++++---- modules/cluster-logging-logstore-limits.adoc | 20 ++++++++++---------- 4 files changed, 25 insertions(+), 22 deletions(-) diff --git a/modules/cluster-logging-about-crd.adoc b/modules/cluster-logging-about-crd.adoc index 81c28d8e83b0..4a3bca94a358 100644 --- a/modules/cluster-logging-about-crd.adoc +++ b/modules/cluster-logging-about-crd.adoc @@ -43,10 +43,11 @@ spec: request: memory: 8G proxy: - limits: - memory: 100Mi - requests: - memory: 100Mi + resources: + limits: + memory: 256Mi + requests: + memory: 256Mi visualization: type: "kibana" kibana: diff --git a/modules/cluster-logging-deploy-cli.adoc b/modules/cluster-logging-deploy-cli.adoc index 12cc1dbf5ca2..98f3e9e84640 100644 --- a/modules/cluster-logging-deploy-cli.adoc +++ b/modules/cluster-logging-deploy-cli.adoc @@ -364,10 +364,11 @@ spec: requests: memory: "8Gi" proxy: <8> - limits: - memory: 256Mi - requests: - memory: 256Mi + resources: + limits: + memory: 256Mi + requests: + memory: 256Mi redundancyPolicy: "SingleRedundancy" visualization: type: "kibana" <9> diff --git a/modules/cluster-logging-deploy-console.adoc b/modules/cluster-logging-deploy-console.adoc index 7527475a8cea..f4693f17158d 100644 --- a/modules/cluster-logging-deploy-console.adoc +++ b/modules/cluster-logging-deploy-console.adoc @@ -196,10 +196,11 @@ spec: storage: storageClassName: "" <6> size: 200G - resources: <7> - requests: - memory: "8Gi" - proxy: <8> + resources: <7> + requests: + memory: "8Gi" + proxy: <8> + resources: limits: memory: 256Mi requests: diff --git a/modules/cluster-logging-logstore-limits.adoc b/modules/cluster-logging-logstore-limits.adoc index 5718f1bf0cfb..49d21575271f 100644 --- a/modules/cluster-logging-logstore-limits.adoc +++ b/modules/cluster-logging-logstore-limits.adoc @@ -3,7 +3,7 @@ // * logging/cluster-logging-elasticsearch.adoc [id="cluster-logging-logstore-limits_{context}"] -= Configuring CPU and memory requests for the log store += Configuring CPU and memory requests for the log store Each component specification allows for adjustments to both the CPU and memory requests. You should not have to manually adjust these values as the Elasticsearch @@ -14,7 +14,7 @@ Operator sets values sufficient for your environment. In large-scale clusters, the default memory limit for the Elasticsearch proxy container might not be sufficient, causing the proxy container to be OOMKilled. If you experience this issue, increase the memory requests and limits for the Elasticsearch proxy. ==== -Each Elasticsearch node can operate with a lower memory setting though this is *not* recommended for production deployments. +Each Elasticsearch node can operate with a lower memory setting though this is *not* recommended for production deployments. For production use, you should have no less than the default 16Gi allocated to each pod. Preferably you should allocate as much as possible, up to 64Gi per pod. .Prerequisites @@ -48,16 +48,17 @@ spec: cpu: "1" memory: "64Gi" proxy: <2> - limits: - memory: 100Mi - requests: - memory: 100Mi + resources: + limits: + memory: 100Mi + requests: + memory: 100Mi ---- <1> Specify the CPU and memory requests for Elasticsearch as needed. If you leave these values blank, the Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `16Gi` for the memory request and `1` for the CPU request. <2> Specify the CPU and memory requests for the Elasticsearch proxy as needed. If you leave these values blank, the Elasticsearch Operator sets default values that should be sufficient for most deployments. The default values are `256Mi` for the memory request and `100m` for the CPU request. -If you adjust the amount of Elasticsearch memory, you must change both the request value and the limit value. +If you adjust the amount of Elasticsearch memory, you must change both the request value and the limit value. For example: @@ -72,6 +73,5 @@ For example: memory: "32Gi" ---- -Kubernetes generally adheres the node configuration and does not allow Elasticsearch to use the specified limits. -Setting the same value for the `requests` and `limits` ensures that Elasticsearch can use the CPU and memory you want, assuming the node has the CPU and memory available. - +Kubernetes generally adheres the node configuration and does not allow Elasticsearch to use the specified limits. +Setting the same value for the `requests` and `limits` ensures that Elasticsearch can use the CPU and memory you want, assuming the node has the CPU and memory available.