From 576162a29ba9290c67533d71a763b3e457cfd059 Mon Sep 17 00:00:00 2001 From: Luis Tomas Bolivar Date: Mon, 6 Jul 2020 14:14:14 +0200 Subject: [PATCH 1/6] OpenStack: Add scaling information for API and Ingress --- .../installation-osp-kuryr-api-scaling.adoc | 86 +++++++++++++++++ ...nstallation-osp-kuryr-ingress-scaling.adoc | 95 +++++++++++++++++++ networking/load-balancing-openstack.adoc | 4 +- 3 files changed, 184 insertions(+), 1 deletion(-) create mode 100644 modules/installation-osp-kuryr-api-scaling.adoc create mode 100644 modules/installation-osp-kuryr-ingress-scaling.adoc diff --git a/modules/installation-osp-kuryr-api-scaling.adoc b/modules/installation-osp-kuryr-api-scaling.adoc new file mode 100644 index 000000000000..f8da6dba50e1 --- /dev/null +++ b/modules/installation-osp-kuryr-api-scaling.adoc @@ -0,0 +1,86 @@ +// Module included in the following assemblies: +// +// * networking/openstack/load-balancing-openstack.adoc + +[id="installation-osp-kuryr-octavia-configure{context}"] += Scaling API using {rh-openstack} Octavia + +API can be scaled on {product-title} cluster running on {rh-openstack} with +Octavia support +Octavia is a requirement to deploy {product-title} with Kuryr, as Kuryr uses it +to create the services, but when installing without Kuryr it is not. So first +step is to ensure Octavia is available. +If it is, it can be leveraged to create extra API IPs and better scale with the +load. This allows to remove the API bottleneck as currently the traffics enters +through the master node that holds the API VIP. + +If {product-title} cluster was installed with Kuryr, the Cluster Network +Operator already creates an internal Octavia Load Balancer (Amphora driver) used +for by the pods that wants to reach the kubernetes service IP (e.g., +172.30.0.1). Note: components using the api-int DNS record do not get to the API +through this load balancer. + +As this loadbalancer already exists, we can simply associate the API Floating IP +to it: ++ +---- +$ openstack floating ip unset $API_FIP +$ openstack floating ip set --port $(openstack loadbalancer show -c vip_port_id +-f value ${OCP_CLUSTER}-kuryr-api-loadbalancer) $API_FIP +---- + + +On the other hand, if {product-title} cluster was installed without kuryr, or an +extra load balancer(s) wants to be created, then the next steps can be used: + +. Create an Octavia Load Balancer (Amphora as it requires L7 capabilities). Note +the subnet selected is the same where the worker node VMs are running. ++ +---- +$ openstack loadbalancer create --name API_OCP_CLUSTER --vip-subnet-id WORKER_VMS_SUBNET_ID +---- + +. Wait for the loadbalancer to be in ACTIVE status, and then create the listeners ++ +---- +$ openstack loadbalancer listener create --name API_OCP_CLUSTER_6443 --protocol HTTPS--protocol-port 6443 API_OCP_CLUSTER +---- + +. Create the pool. Note the load balancer algorithm selected is ROUND_ROBIN and +the pool session persistence is enabled: ++ +---- +$ openstack loadbalancer pool create --name API_OCP_CLUSTER_pool_6443 --lb-algorithm ROUND_ROBIN --session-persistence type=SOURCE_IP --listener API_OCP_CLUSTER_6443 --protocol HTTPS +---- + +. Create a health monitor to ensure masters nodes are available (node, if Octavia +version supports HTTP monitors it can be used to check /healthz/readyz instead): ++ +---- +$ openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type TCP API_OCP_CLUSTER_pool_6443 +---- + +. Add the master nodes as members of the load balancer pool: ++ +---- +$ for SERVER in $(MASTER-0-IP MASTER-1-IP MASTER-2-IP) +do + openstack loadbalancer member create --address $SERVER --protocol-port 6443 API_OCP_CLUSTER_pool_6443 +done +---- + +. Remove the API_FIP from the current port if the same wants to be used (otherwise a new one can be created): ++ +---- +$ openstack floating ip unset $API_FIP +---- + +. Add the API_FIP to the created Load Balancer VIP: ++ +---- +$ openstack floating ip set --port $(openstack loadbalancer show -c vip_port_id -f value API_OCP_CLUSTER) $API_FIP +---- + +Note that with Octavia Amphora driver the traffic will still go through a single VM (in this case the Amphora VM). +So, to avoid this the above steps can be replicated as many times as desired, creating several API load balancers to achieve better performance at scale. + diff --git a/modules/installation-osp-kuryr-ingress-scaling.adoc b/modules/installation-osp-kuryr-ingress-scaling.adoc new file mode 100644 index 000000000000..38ce7a5a82e2 --- /dev/null +++ b/modules/installation-osp-kuryr-ingress-scaling.adoc @@ -0,0 +1,95 @@ +// Module included in the following assemblies: +// +// * networking/openstack/load-balancing-openstack.adoc + +[id="installation-osp-kuryr-octavia-configure{context}"] += Scaling Ingress using {rh-openstack} Octavia on Kuryr deployments + +Ingress can be scaled on {product-title} cluster that uses Kuryr by using +Octavia Load Balancers. +Octavia is a requirement to deploy {product-title} with Kuryr, as Kuryr uses it +to create the services. +This can be leveraged to create extra Ingress APIs and better scale with the +load. +This allows to remove the Ingress bottleneck as currently the traffics enters +through the worker node that holds the Ingress VIP. + +The steps to follow are the next: + +. Copy the current internal router service: ++ +---- +$ oc -n openshift-ingress get svc router-internal-default -o yaml > external_router.yaml +---- + +. Change the `name` (e.g., `router-external-default`) and the `type` to +`LoadBalancer` so that Kuryr associates a Floating IP to the Load Balancer VIP. +Also remove all the extra information not needed such as timeStamps, IDs, ... ++ +---- +$ cat external_router.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + ingresscontroller.operator.openshift.io/owning-ingresscontroller: default + name: router-external-default + namespace: openshift-ingress +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + - name: https + port: 443 + protocol: TCP + targetPort: https + - name: metrics + port: 1936 + protocol: TCP + targetPort: 1936 + selector: + ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default + sessionAffinity: None + type: LoadBalancer +---- + +. And create the above service: ++ +---- +$ oc apply -f external_router.yaml +---- + +. Check the `external-ip` that the service got, and ensure it is the same as the one associated to the created load balancer: ++ +---- +$ oc -n openshift-ingress get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +router-external-default LoadBalancer 172.30.235.33 80:30112/TCP,443:32359/TCP,1936:30317/TCP 10s +router-internal-default ClusterIP 172.30.115.123 80/TCP,443/TCP,1936/TCP 22h + +$ oc -n openshift-ingress get svc +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +router-external-default LoadBalancer 172.30.235.33 10.46.22.161 80:30112/TCP,443:32359/TCP,1936:30317/TCP 3m38s +router-internal-default ClusterIP 172.30.115.123 80/TCP,443/TCP,1936/TCP 22h + +$ openstack loadbalancer list | grep router-external +| 21bf6afe-b498-4a16-a958-3229e83c002c | openshift-ingress/router-external-default | 66f3816acf1b431691b8d132cc9d793c | 172.30.235.33 | ACTIVE | octavia | + +$ openstack floating ip list | grep 172.30.235.33 +| e2f80e97-8266-4b69-8636-e58bacf1879e | 10.46.22.161 | 172.30.235.33 | 655e7122-806a-4e0a-a104-220c6e17bda6 | a565e55a-99e7-4d15-b4df-f9d7ee8c9deb | 66f3816acf1b431691b8d132cc9d793c | +---- + +The `external-ip` is the new Ingress IP to use externally. + +Note that if Kuryr uses Octavia Amphora driver, still the traffic will go +through a single VM (in this case the Amphora VM). So, to avoid this the above +steps can be replicated as many times as desired, creating different Ingress +Floating IPs that can be used to achieve better performance. + +By contrast, if Octavia OVN driver is used, the traffic is already distributed +and it will enter the cluster though the OVN gateway nodes (same as any other +N/S traffic). Anyway, it is also possible to create several external +(LoadBalancer type) services with it too. + diff --git a/networking/load-balancing-openstack.adoc b/networking/load-balancing-openstack.adoc index 966afb8209a5..3fb425b2d09b 100644 --- a/networking/load-balancing-openstack.adoc +++ b/networking/load-balancing-openstack.adoc @@ -5,4 +5,6 @@ include::modules/common-attributes.adoc[] toc::[] -include::modules/installation-osp-kuryr-octavia-upgrade.adoc[leveloffset=+1] \ No newline at end of file +include::modules/installation-osp-kuryr-octavia-upgrade.adoc[leveloffset=+1] +include::modules/installation-osp-kuryr-api-scaling.adoc[leveloffset=+1] +include::modules/installation-osp-kuryr-ingress-scaling.adoc[leveloffset=+1] From 304837b4eb36be9a8d634ba4332024679ac8505f Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Wed, 26 Aug 2020 12:10:36 -0400 Subject: [PATCH 2/6] Edits for task mod --- ...nstallation-osp-kuryr-ingress-scaling.adoc | 103 ++++++++++++------ 1 file changed, 68 insertions(+), 35 deletions(-) diff --git a/modules/installation-osp-kuryr-ingress-scaling.adoc b/modules/installation-osp-kuryr-ingress-scaling.adoc index 38ce7a5a82e2..832aa9bfd4e6 100644 --- a/modules/installation-osp-kuryr-ingress-scaling.adoc +++ b/modules/installation-osp-kuryr-ingress-scaling.adoc @@ -2,38 +2,44 @@ // // * networking/openstack/load-balancing-openstack.adoc -[id="installation-osp-kuryr-octavia-configure{context}"] +[id="installation-osp-kuryr-octavia-configure_{context}"] = Scaling Ingress using {rh-openstack} Octavia on Kuryr deployments -Ingress can be scaled on {product-title} cluster that uses Kuryr by using -Octavia Load Balancers. -Octavia is a requirement to deploy {product-title} with Kuryr, as Kuryr uses it -to create the services. -This can be leveraged to create extra Ingress APIs and better scale with the -load. -This allows to remove the Ingress bottleneck as currently the traffics enters -through the worker node that holds the Ingress VIP. +You can use Octavia load balancers to scale Ingress controllers on clusters that use Kuryr. -The steps to follow are the next: +// Potentially repurpose into concept module +// Octavia is a requirement to deploy {product-title} with Kuryr, as Kuryr uses it +// to create the services. +// This can be leveraged to create extra Ingress APIs and better scale with the +// load. +// This allows to remove the Ingress bottleneck as currently the traffics enters +// through the worker node that holds the Ingress VIP. -. Copy the current internal router service: +.Prerequisites + +* Your {product-title} cluster uses Kuryr. + +.Procedure + +. To copy the current internal router service, on a command line, enter: + +[source,terminal] ---- $ oc -n openshift-ingress get svc router-internal-default -o yaml > external_router.yaml ---- -. Change the `name` (e.g., `router-external-default`) and the `type` to -`LoadBalancer` so that Kuryr associates a Floating IP to the Load Balancer VIP. -Also remove all the extra information not needed such as timeStamps, IDs, ... +. In the file `external_router.yaml`, change the values of `metadata.name` and `spec.type` to +`LoadBalancer`. + +[source,yaml] +.Example router file ---- -$ cat external_router.yaml apiVersion: v1 kind: Service metadata: labels: ingresscontroller.operator.openshift.io/owning-ingresscontroller: default - name: router-external-default + name: LoadBalancer <1> namespace: openshift-ingress spec: ports: @@ -52,44 +58,71 @@ spec: selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default sessionAffinity: None - type: LoadBalancer + type: LoadBalancer <2> ---- +<1> Ensure that this value is `LoadBalancer`. +<2> Ensure that this value is `LoadBalancer`. -. And create the above service: +[NOTE] +==== +You can also delete timestamps and other information that is irrelevant to load balancing. +==== + +. From a command line, create a service from the `external_router.yaml` file: + +[source,terminal] ---- $ oc apply -f external_router.yaml ---- -. Check the `external-ip` that the service got, and ensure it is the same as the one associated to the created load balancer: +. Verify that the service's external IP address is the same as the one that is associated with the load balancer: +.. On a command line, retrieve the service's external IP address: + +[source,terminal] ---- $ oc -n openshift-ingress get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-external-default LoadBalancer 172.30.235.33 80:30112/TCP,443:32359/TCP,1936:30317/TCP 10s -router-internal-default ClusterIP 172.30.115.123 80/TCP,443/TCP,1936/TCP 22h - -$ oc -n openshift-ingress get svc +---- ++ +[source,terminal] +.Example output +---- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE router-external-default LoadBalancer 172.30.235.33 10.46.22.161 80:30112/TCP,443:32359/TCP,1936:30317/TCP 3m38s router-internal-default ClusterIP 172.30.115.123 80/TCP,443/TCP,1936/TCP 22h +---- +.. Retrieve the load balancer's IP address: ++ +[source,terminal] +---- $ openstack loadbalancer list | grep router-external -| 21bf6afe-b498-4a16-a958-3229e83c002c | openshift-ingress/router-external-default | 66f3816acf1b431691b8d132cc9d793c | 172.30.235.33 | ACTIVE | octavia | +---- ++ +.Example output +[source,terminal] +---- +| 21bf6afe-b498-4a16-a958-3229e83c002c | openshift-ingress/router-external-default | 66f3816acf1b431691b8d132cc9d793c | 172.30.235.33 | ACTIVE | octavia | +---- +.. Verify that the addresses you retrieved in the previous steps are associated in the floating IP list: ++ +[source,terminal] +---- $ openstack floating ip list | grep 172.30.235.33 -| e2f80e97-8266-4b69-8636-e58bacf1879e | 10.46.22.161 | 172.30.235.33 | 655e7122-806a-4e0a-a104-220c6e17bda6 | a565e55a-99e7-4d15-b4df-f9d7ee8c9deb | 66f3816acf1b431691b8d132cc9d793c | +---- ++ +.Example output +[source,terminal] +---- +| e2f80e97-8266-4b69-8636-e58bacf1879e | 10.46.22.161 | 172.30.235.33 | 655e7122-806a-4e0a-a104-220c6e17bda6 | a565e55a-99e7-4d15-b4df-f9d7ee8c9deb | 66f3816acf1b431691b8d132cc9d793c | ---- -The `external-ip` is the new Ingress IP to use externally. +You can now use the value of `EXTERNAL-IP` as the new Ingress address. -Note that if Kuryr uses Octavia Amphora driver, still the traffic will go -through a single VM (in this case the Amphora VM). So, to avoid this the above -steps can be replicated as many times as desired, creating different Ingress -Floating IPs that can be used to achieve better performance. +[NOTE] +==== +If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora VM. -By contrast, if Octavia OVN driver is used, the traffic is already distributed -and it will enter the cluster though the OVN gateway nodes (same as any other -N/S traffic). Anyway, it is also possible to create several external -(LoadBalancer type) services with it too. +You can perform this procedure multiple times to create additional floating IP addresses, which can alleviate the bottleneck. +==== From a7b3d3fd7dc7f106e1eb99ef6fe4338a42f249f3 Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Wed, 26 Aug 2020 12:10:36 -0400 Subject: [PATCH 3/6] Edits for task mod --- modules/installation-osp-api-octavia.adoc | 18 ++++ modules/installation-osp-api-scaling.adoc | 78 ++++++++++++++ .../installation-osp-kuryr-api-scaling.adoc | 86 ++++----------- ...nstallation-osp-kuryr-ingress-scaling.adoc | 100 +++++++++++------- networking/load-balancing-openstack.adoc | 6 +- 5 files changed, 183 insertions(+), 105 deletions(-) create mode 100644 modules/installation-osp-api-octavia.adoc create mode 100644 modules/installation-osp-api-scaling.adoc diff --git a/modules/installation-osp-api-octavia.adoc b/modules/installation-osp-api-octavia.adoc new file mode 100644 index 000000000000..ace611313127 --- /dev/null +++ b/modules/installation-osp-api-octavia.adoc @@ -0,0 +1,18 @@ +// Module included in the following assemblies: +// +// * networking/openstack/load-balancing-openstack.adoc + +[id="installation-osp-api-octavia_{context}"] += Scaling clusters for application traffic by using Octavia + +{product-title} clusters that run on {rh-openstack-first} can use the Octavia +load balancing service to distribute traffic across multiple VMs or floating IP +addresses. This feature mitigates the bottleneck that single machines or +addresses create. + +If your cluster uses Kuryr, the Cluster Network Operator created an internal +Octavia load balancer at deployment. You can use this load balancer for +application network scaling. + +If your cluster does not use Kuryr, you must create your own Octavia load +balancer to use it for application network scaling. \ No newline at end of file diff --git a/modules/installation-osp-api-scaling.adoc b/modules/installation-osp-api-scaling.adoc new file mode 100644 index 000000000000..e3ab4a9b2266 --- /dev/null +++ b/modules/installation-osp-api-scaling.adoc @@ -0,0 +1,78 @@ +// Module included in the following assemblies: +// +// * networking/openstack/load-balancing-openstack.adoc + +[id="installation-osp-api-scaling_{context}"] += Scaling clusters by using Octavia + +If your cluster does not use Kuryr, create an Octavia load balancer and then +configure your cluster to use it. + +.Prerequisites + +* Octavia is available on your {rh-openstack} deployment. + +.Procedure + +. From a command line, create an Octavia load balancer that uses the Amphora driver: ++ +[source,terminal] +---- +$ openstack loadbalancer create --name API_OCP_CLUSTER --vip-subnet-id +---- ++ +You can use a name of your choice instead of `API_OCP_CLUSTER`. + +. After the load balancer becomes active, create listeners: ++ +[source,terminal] +---- +$ openstack loadbalancer listener create --name API_OCP_CLUSTER_6443 --protocol HTTPS--protocol-port 6443 API_OCP_CLUSTER +---- + +. Create a pool that uses the round robin algorithm and has session persistence enabled: ++ +[source,terminal] +---- +$ openstack loadbalancer pool create --name API_OCP_CLUSTER_pool_6443 --lb-algorithm ROUND_ROBIN --session-persistence type= --listener API_OCP_CLUSTER_6443 --protocol HTTPS +---- + +. To ensure that control plane machines are available, create a health monitor: ++ +[source,terminal] +---- +$ openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type TCP API_OCP_CLUSTER_pool_6443 +---- + +. Add the control plane machines as members of the load balancer pool: ++ +[source,terminal] +---- +$ for SERVER in $(MASTER-0-IP MASTER-1-IP MASTER-2-IP) +do + openstack loadbalancer member create --address $SERVER --protocol-port 6443 API_OCP_CLUSTER_pool_6443 +done +---- + +. Optional: To reuse the cluster API floating IP address, unset it: ++ +[source,terminal] +---- +$ openstack floating ip unset $API_FIP +---- + +. Add either the unset `API_FIP` or a new address to the created load balancer VIP: ++ +[source,terminal] +---- +$ openstack floating ip set --port $(openstack loadbalancer show -c -f value API_OCP_CLUSTER) $API_FIP +---- + +Your cluster now uses Octavia for load balancing. + +[NOTE] +==== +If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora VM. + +You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. +==== \ No newline at end of file diff --git a/modules/installation-osp-kuryr-api-scaling.adoc b/modules/installation-osp-kuryr-api-scaling.adoc index f8da6dba50e1..2f878d800f6d 100644 --- a/modules/installation-osp-kuryr-api-scaling.adoc +++ b/modules/installation-osp-kuryr-api-scaling.adoc @@ -2,85 +2,39 @@ // // * networking/openstack/load-balancing-openstack.adoc -[id="installation-osp-kuryr-octavia-configure{context}"] -= Scaling API using {rh-openstack} Octavia +[id="installation-osp-kuryr-api-scaling_{context}"] += Scaling clusters that use Kuryr by using Octavia -API can be scaled on {product-title} cluster running on {rh-openstack} with -Octavia support -Octavia is a requirement to deploy {product-title} with Kuryr, as Kuryr uses it -to create the services, but when installing without Kuryr it is not. So first -step is to ensure Octavia is available. -If it is, it can be leveraged to create extra API IPs and better scale with the -load. This allows to remove the API bottleneck as currently the traffics enters -through the master node that holds the API VIP. +If your cluster uses Kuryr, associate your cluster's API floating IP address +with the pre-existing Octavia load balancer. -If {product-title} cluster was installed with Kuryr, the Cluster Network -Operator already creates an internal Octavia Load Balancer (Amphora driver) used -for by the pods that wants to reach the kubernetes service IP (e.g., -172.30.0.1). Note: components using the api-int DNS record do not get to the API -through this load balancer. +.Prerequisites -As this loadbalancer already exists, we can simply associate the API Floating IP -to it: -+ ----- -$ openstack floating ip unset $API_FIP -$ openstack floating ip set --port $(openstack loadbalancer show -c vip_port_id --f value ${OCP_CLUSTER}-kuryr-api-loadbalancer) $API_FIP ----- +* Your {product-title} cluster uses Kuryr. +* Octavia is available on your {rh-openstack} deployment. -On the other hand, if {product-title} cluster was installed without kuryr, or an -extra load balancer(s) wants to be created, then the next steps can be used: +.Procedure -. Create an Octavia Load Balancer (Amphora as it requires L7 capabilities). Note -the subnet selected is the same where the worker node VMs are running. -+ ----- -$ openstack loadbalancer create --name API_OCP_CLUSTER --vip-subnet-id WORKER_VMS_SUBNET_ID ----- - -. Wait for the loadbalancer to be in ACTIVE status, and then create the listeners -+ ----- -$ openstack loadbalancer listener create --name API_OCP_CLUSTER_6443 --protocol HTTPS--protocol-port 6443 API_OCP_CLUSTER ----- - -. Create the pool. Note the load balancer algorithm selected is ROUND_ROBIN and -the pool session persistence is enabled: -+ ----- -$ openstack loadbalancer pool create --name API_OCP_CLUSTER_pool_6443 --lb-algorithm ROUND_ROBIN --session-persistence type=SOURCE_IP --listener API_OCP_CLUSTER_6443 --protocol HTTPS ----- - -. Create a health monitor to ensure masters nodes are available (node, if Octavia -version supports HTTP monitors it can be used to check /healthz/readyz instead): -+ ----- -$ openstack loadbalancer healthmonitor create --delay 5 --max-retries 4 --timeout 10 --type TCP API_OCP_CLUSTER_pool_6443 ----- - -. Add the master nodes as members of the load balancer pool: -+ ----- -$ for SERVER in $(MASTER-0-IP MASTER-1-IP MASTER-2-IP) -do - openstack loadbalancer member create --address $SERVER --protocol-port 6443 API_OCP_CLUSTER_pool_6443 -done ----- - -. Remove the API_FIP from the current port if the same wants to be used (otherwise a new one can be created): +. Optional: From a command line, to reuse the cluster API floating IP address, unset it: + +[source,terminal] ---- $ openstack floating ip unset $API_FIP ---- -. Add the API_FIP to the created Load Balancer VIP: +. Add either the unset `API_FIP` or a new address to the created load balancer VIP: + +[source,terminal] ---- -$ openstack floating ip set --port $(openstack loadbalancer show -c vip_port_id -f value API_OCP_CLUSTER) $API_FIP +$ openstack floating ip set --port $(openstack loadbalancer show -c -f value ${OCP_CLUSTER}-kuryr-api-loadbalancer) $API_FIP ---- -Note that with Octavia Amphora driver the traffic will still go through a single VM (in this case the Amphora VM). -So, to avoid this the above steps can be replicated as many times as desired, creating several API load balancers to achieve better performance at scale. +Your cluster now uses Octavia for load balancing. + +[NOTE] +==== +If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora VM. +You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. +==== \ No newline at end of file diff --git a/modules/installation-osp-kuryr-ingress-scaling.adoc b/modules/installation-osp-kuryr-ingress-scaling.adoc index 38ce7a5a82e2..eb7ddac6e5ec 100644 --- a/modules/installation-osp-kuryr-ingress-scaling.adoc +++ b/modules/installation-osp-kuryr-ingress-scaling.adoc @@ -2,38 +2,38 @@ // // * networking/openstack/load-balancing-openstack.adoc -[id="installation-osp-kuryr-octavia-configure{context}"] -= Scaling Ingress using {rh-openstack} Octavia on Kuryr deployments +[id="installation-osp-kuryr-octavia-configure_{context}"] += Scaling for ingress traffic by using {rh-openstack} Octavia -Ingress can be scaled on {product-title} cluster that uses Kuryr by using -Octavia Load Balancers. -Octavia is a requirement to deploy {product-title} with Kuryr, as Kuryr uses it -to create the services. -This can be leveraged to create extra Ingress APIs and better scale with the -load. -This allows to remove the Ingress bottleneck as currently the traffics enters -through the worker node that holds the Ingress VIP. +You can use Octavia load balancers to scale Ingress controllers on clusters that use Kuryr. -The steps to follow are the next: +.Prerequisites -. Copy the current internal router service: +* Your {product-title} cluster uses Kuryr. + +* Octavia is available on your {rh-openstack} deployment. + +.Procedure + +. To copy the current internal router service, on a command line, enter: + +[source,terminal] ---- $ oc -n openshift-ingress get svc router-internal-default -o yaml > external_router.yaml ---- -. Change the `name` (e.g., `router-external-default`) and the `type` to -`LoadBalancer` so that Kuryr associates a Floating IP to the Load Balancer VIP. -Also remove all the extra information not needed such as timeStamps, IDs, ... +. In the file `external_router.yaml`, change the values of `metadata.name` and `spec.type` to +`LoadBalancer`. + +[source,yaml] +.Example router file ---- -$ cat external_router.yaml apiVersion: v1 kind: Service metadata: labels: ingresscontroller.operator.openshift.io/owning-ingresscontroller: default - name: router-external-default + name: router-external-default <1> namespace: openshift-ingress spec: ports: @@ -52,44 +52,70 @@ spec: selector: ingresscontroller.operator.openshift.io/deployment-ingresscontroller: default sessionAffinity: None - type: LoadBalancer + type: LoadBalancer <2> ---- +<1> Ensure that this value is descriptive, like `router-external-default`. +<2> Ensure that this value is `LoadBalancer`. -. And create the above service: +[NOTE] +==== +You can delete timestamps and other information that is irrelevant to load balancing. +==== + +. From a command line, create a service from the `external_router.yaml` file: + +[source,terminal] ---- $ oc apply -f external_router.yaml ---- -. Check the `external-ip` that the service got, and ensure it is the same as the one associated to the created load balancer: +. Verify that the service's external IP address is the same as the one that is associated with the load balancer: +.. On a command line, retrieve the service's external IP address: + +[source,terminal] ---- $ oc -n openshift-ingress get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -router-external-default LoadBalancer 172.30.235.33 80:30112/TCP,443:32359/TCP,1936:30317/TCP 10s -router-internal-default ClusterIP 172.30.115.123 80/TCP,443/TCP,1936/TCP 22h - -$ oc -n openshift-ingress get svc +---- ++ +[source,terminal] +.Example output +---- NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE router-external-default LoadBalancer 172.30.235.33 10.46.22.161 80:30112/TCP,443:32359/TCP,1936:30317/TCP 3m38s router-internal-default ClusterIP 172.30.115.123 80/TCP,443/TCP,1936/TCP 22h +---- +.. Retrieve the load balancer's IP address: ++ +[source,terminal] +---- $ openstack loadbalancer list | grep router-external -| 21bf6afe-b498-4a16-a958-3229e83c002c | openshift-ingress/router-external-default | 66f3816acf1b431691b8d132cc9d793c | 172.30.235.33 | ACTIVE | octavia | +---- ++ +.Example output +[source,terminal] +---- +| 21bf6afe-b498-4a16-a958-3229e83c002c | openshift-ingress/router-external-default | 66f3816acf1b431691b8d132cc9d793c | 172.30.235.33 | ACTIVE | octavia | +---- +.. Verify that the addresses you retrieved in the previous steps are associated with each other in the floating IP list: ++ +[source,terminal] +---- $ openstack floating ip list | grep 172.30.235.33 -| e2f80e97-8266-4b69-8636-e58bacf1879e | 10.46.22.161 | 172.30.235.33 | 655e7122-806a-4e0a-a104-220c6e17bda6 | a565e55a-99e7-4d15-b4df-f9d7ee8c9deb | 66f3816acf1b431691b8d132cc9d793c | +---- ++ +.Example output +[source,terminal] +---- +| e2f80e97-8266-4b69-8636-e58bacf1879e | 10.46.22.161 | 172.30.235.33 | 655e7122-806a-4e0a-a104-220c6e17bda6 | a565e55a-99e7-4d15-b4df-f9d7ee8c9deb | 66f3816acf1b431691b8d132cc9d793c | ---- -The `external-ip` is the new Ingress IP to use externally. - -Note that if Kuryr uses Octavia Amphora driver, still the traffic will go -through a single VM (in this case the Amphora VM). So, to avoid this the above -steps can be replicated as many times as desired, creating different Ingress -Floating IPs that can be used to achieve better performance. +You can now use the value of `EXTERNAL-IP` as the new Ingress address. -By contrast, if Octavia OVN driver is used, the traffic is already distributed -and it will enter the cluster though the OVN gateway nodes (same as any other -N/S traffic). Anyway, it is also possible to create several external -(LoadBalancer type) services with it too. +[NOTE] +==== +If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora VM. +You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. +==== \ No newline at end of file diff --git a/networking/load-balancing-openstack.adoc b/networking/load-balancing-openstack.adoc index 3fb425b2d09b..ac761bc16201 100644 --- a/networking/load-balancing-openstack.adoc +++ b/networking/load-balancing-openstack.adoc @@ -6,5 +6,7 @@ include::modules/common-attributes.adoc[] toc::[] include::modules/installation-osp-kuryr-octavia-upgrade.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-api-scaling.adoc[leveloffset=+1] -include::modules/installation-osp-kuryr-ingress-scaling.adoc[leveloffset=+1] +include::modules/installation-osp-api-octavia.adoc[leveloffset=+1] +include::modules/installation-osp-api-scaling.adoc[leveloffset=+2] +include::modules/installation-osp-kuryr-api-scaling.adoc[leveloffset=+2] +include::modules/installation-osp-kuryr-ingress-scaling.adoc[leveloffset=+1] \ No newline at end of file From ecbd10bf14a8f143949377f299cca8c6005f6fe8 Mon Sep 17 00:00:00 2001 From: Max Bridges Date: Mon, 28 Sep 2020 14:25:35 -0400 Subject: [PATCH 4/6] Adding status note --- modules/installation-osp-api-scaling.adoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/installation-osp-api-scaling.adoc b/modules/installation-osp-api-scaling.adoc index e3ab4a9b2266..e875eb4ee22b 100644 --- a/modules/installation-osp-api-scaling.adoc +++ b/modules/installation-osp-api-scaling.adoc @@ -29,6 +29,11 @@ You can use a name of your choice instead of `API_OCP_CLUSTER`. ---- $ openstack loadbalancer listener create --name API_OCP_CLUSTER_6443 --protocol HTTPS--protocol-port 6443 API_OCP_CLUSTER ---- ++ +[NOTE] +==== +To view the load balancer's status, enter `openstack loadbalancer list`. +==== . Create a pool that uses the round robin algorithm and has session persistence enabled: + From 542094625bb1ef35151b69890b26295e925075b4 Mon Sep 17 00:00:00 2001 From: Max Bridges <50179998+maxwelldb@users.noreply.github.com> Date: Tue, 10 Nov 2020 09:59:06 -0500 Subject: [PATCH 5/6] Update modules/installation-osp-kuryr-ingress-scaling.adoc --- modules/installation-osp-kuryr-ingress-scaling.adoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/installation-osp-kuryr-ingress-scaling.adoc b/modules/installation-osp-kuryr-ingress-scaling.adoc index 6e9252275e47..82d97b58c7eb 100644 --- a/modules/installation-osp-kuryr-ingress-scaling.adoc +++ b/modules/installation-osp-kuryr-ingress-scaling.adoc @@ -2,7 +2,7 @@ // // * networking/openstack/load-balancing-openstack.adoc -[id="installation-osp-kuryr-octavia-configure_{context}"] +[id="installation-osp-kuryr-octavia-scale_{context}"] = Scaling for ingress traffic by using {rh-openstack} Octavia You can use Octavia load balancers to scale Ingress controllers on clusters that use Kuryr. @@ -119,4 +119,4 @@ You can now use the value of `EXTERNAL-IP` as the new Ingress address. If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora VM. You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. -==== \ No newline at end of file +==== From be49298733d65b4667e23051913356927a6c0d54 Mon Sep 17 00:00:00 2001 From: Max Bridges <50179998+maxwelldb@users.noreply.github.com> Date: Thu, 3 Dec 2020 09:51:22 -0500 Subject: [PATCH 6/6] Wording tweak --- modules/installation-osp-api-scaling.adoc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/installation-osp-api-scaling.adoc b/modules/installation-osp-api-scaling.adoc index e875eb4ee22b..211f0cf2539a 100644 --- a/modules/installation-osp-api-scaling.adoc +++ b/modules/installation-osp-api-scaling.adoc @@ -5,8 +5,7 @@ [id="installation-osp-api-scaling_{context}"] = Scaling clusters by using Octavia -If your cluster does not use Kuryr, create an Octavia load balancer and then -configure your cluster to use it. +If you want to use multiple API load balancers, or if your cluster does not use Kuryr, create an Octavia load balancer and then configure your cluster to use it. .Prerequisites @@ -80,4 +79,4 @@ Your cluster now uses Octavia for load balancing. If Kuryr uses the Octavia Amphora driver, all traffic is routed through a single Amphora VM. You can repeat this procedure to create additional load balancers, which can alleviate the bottleneck. -==== \ No newline at end of file +====