From 5b6f64cb13007e3e41c75cdf15377b2041a76ca8 Mon Sep 17 00:00:00 2001 From: scaleway-bot Date: Mon, 13 Mar 2023 17:03:41 +0000 Subject: [PATCH] feat: update generated APIs --- ...-all-usage-k8s-cluster-create-usage.golden | 52 +- ...-all-usage-k8s-cluster-delete-usage.golden | 6 +- ...est-all-usage-k8s-cluster-get-usage.golden | 4 +- ...uster-list-available-versions-usage.golden | 6 +- ...st-all-usage-k8s-cluster-list-usage.golden | 16 +- ...k8s-cluster-reset-admin-token-usage.golden | 6 +- ...-all-usage-k8s-cluster-update-usage.golden | 24 +- ...all-usage-k8s-cluster-upgrade-usage.golden | 12 +- .../test-all-usage-k8s-cluster-usage.golden | 4 +- ...est-all-usage-k8s-node-delete-usage.golden | 10 +- .../test-all-usage-k8s-node-get-usage.golden | 6 +- .../test-all-usage-k8s-node-list-usage.golden | 18 +- ...est-all-usage-k8s-node-reboot-usage.golden | 6 +- ...st-all-usage-k8s-node-replace-usage.golden | 6 +- .../test-all-usage-k8s-node-usage.golden | 4 +- ...est-all-usage-k8s-pool-create-usage.golden | 38 +- ...est-all-usage-k8s-pool-delete-usage.golden | 6 +- .../test-all-usage-k8s-pool-get-usage.golden | 4 +- .../test-all-usage-k8s-pool-list-usage.golden | 18 +- ...est-all-usage-k8s-pool-update-usage.golden | 18 +- ...st-all-usage-k8s-pool-upgrade-usage.golden | 8 +- .../test-all-usage-k8s-pool-usage.golden | 4 +- ...est-all-usage-k8s-version-get-usage.golden | 4 +- ...st-all-usage-k8s-version-list-usage.golden | 2 +- .../test-all-usage-k8s-version-usage.golden | 6 +- ...-usage-lb-backend-add-servers-usage.golden | 4 +- ...t-all-usage-lb-backend-create-usage.golden | 62 +- ...t-all-usage-lb-backend-delete-usage.golden | 2 +- ...test-all-usage-lb-backend-get-usage.golden | 2 +- ...est-all-usage-lb-backend-list-usage.golden | 8 +- ...age-lb-backend-remove-servers-usage.golden | 4 +- ...-usage-lb-backend-set-servers-usage.golden | 4 +- ...lb-backend-update-healthcheck-usage.golden | 26 +- ...t-all-usage-lb-backend-update-usage.golden | 34 +- .../test-all-usage-lb-backend-usage.golden | 16 +- ...l-usage-lb-certificate-create-usage.golden | 12 +- ...l-usage-lb-certificate-delete-usage.golden | 2 +- ...-all-usage-lb-certificate-get-usage.golden | 2 +- ...all-usage-lb-certificate-list-usage.golden | 8 +- ...l-usage-lb-certificate-update-usage.golden | 2 +- ...test-all-usage-lb-certificate-usage.golden | 10 +- ...-all-usage-lb-frontend-create-usage.golden | 18 +- ...-all-usage-lb-frontend-delete-usage.golden | 4 +- ...est-all-usage-lb-frontend-get-usage.golden | 2 +- ...st-all-usage-lb-frontend-list-usage.golden | 8 +- ...-all-usage-lb-frontend-update-usage.golden | 16 +- .../test-all-usage-lb-frontend-usage.golden | 4 +- ...age-lb-private-network-attach-usage.golden | 6 +- ...age-lb-private-network-detach-usage.golden | 2 +- ...usage-lb-private-network-list-usage.golden | 6 +- ...-all-usage-lb-private-network-usage.golden | 6 +- ...est-all-usage-lb-route-create-usage.golden | 8 +- ...est-all-usage-lb-route-delete-usage.golden | 4 +- .../test-all-usage-lb-route-get-usage.golden | 4 +- .../test-all-usage-lb-route-list-usage.golden | 4 +- ...est-all-usage-lb-route-update-usage.golden | 8 +- .../test-all-usage-lb-route-usage.golden | 10 +- .../testdata/test-all-usage-lb-usage.golden | 2 +- .../test-all-usage-lbacl-create-usage.golden | 24 +- .../test-all-usage-lbacl-delete-usage.golden | 4 +- .../test-all-usage-lbacl-get-usage.golden | 4 +- .../test-all-usage-lbacl-list-usage.golden | 8 +- .../test-all-usage-lbacl-set-usage.golden | 24 +- .../test-all-usage-lbacl-update-usage.golden | 24 +- .../test-all-usage-lbacl-usage.golden | 4 +- .../test-all-usage-lbip-create-usage.golden | 4 +- .../test-all-usage-lbip-delete-usage.golden | 2 +- .../test-all-usage-lbip-get-usage.golden | 2 +- .../test-all-usage-lbip-list-usage.golden | 8 +- .../test-all-usage-lbip-update-usage.golden | 4 +- .../testdata/test-all-usage-lbip-usage.golden | 10 +- .../test-all-usage-lblb-create-usage.golden | 14 +- .../test-all-usage-lblb-delete-usage.golden | 6 +- ...test-all-usage-lblb-get-stats-usage.golden | 4 +- .../test-all-usage-lblb-get-usage.golden | 4 +- .../test-all-usage-lblb-list-usage.golden | 10 +- .../test-all-usage-lblb-migrate-usage.golden | 6 +- ...est-all-usage-lblb-types-list-usage.golden | 2 +- .../test-all-usage-lblb-types-usage.golden | 2 +- .../test-all-usage-lblb-update-usage.golden | 12 +- .../testdata/test-all-usage-lblb-usage.golden | 14 +- cmd/scw/testdata/test-main-usage-usage.golden | 2 +- docs/commands/k8s.md | 292 +++++---- docs/commands/lb.md | 582 +++++++++--------- internal/namespaces/k8s/v1/k8s_cli.go | 288 +++++---- 85 files changed, 967 insertions(+), 991 deletions(-) diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden index 0ce1a5ee0f..9d87508264 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-create-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to create a new Kubernetes cluster on an account. +Creates a new Kubernetes cluster on a Scaleway account. USAGE: scw k8s cluster create [arg=value ...] @@ -14,28 +14,28 @@ EXAMPLES: ARGS: [project-id] Project ID to use. If none is passed the default project ID will be used - [type] The type of the cluster - name= The name of the cluster - [description] The description of the cluster - [tags.{index}] The tags associated with the cluster - version=latest The Kubernetes version of the cluster - cni=cilium The Container Network Interface (CNI) plugin that will run in the cluster (unknown_cni | cilium | calico | weave | flannel | kilo) - pools.{index}.name The name of the pool - pools.{index}.node-type The node type is the type of Scaleway Instance wanted for the pool - [pools.{index}.placement-group-id] The placement group ID in which all the nodes of the pool will be created - [pools.{index}.autoscaling] The enablement of the autoscaling feature for the pool - pools.{index}.size The size (number of nodes) of the pool - [pools.{index}.min-size] The minimum size of the pool - [pools.{index}.max-size] The maximum size of the pool - [pools.{index}.container-runtime] The container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) - [pools.{index}.autohealing] The enablement of the autohealing feature for the pool - [pools.{index}.tags.{index}] The tags associated with the pool - [pools.{index}.kubelet-args.{key}] The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental + [type] Type of the cluster + name= Name of the cluster + [description] Description of the cluster + [tags.{index}] Tags associated with the cluster + version=latest Kubernetes version of the cluster + cni=cilium Container Network Interface (CNI) plugin that will run in the cluster (unknown_cni | cilium | calico | weave | flannel | kilo) + pools.{index}.name Name of the pool + pools.{index}.node-type Node type is the type of Scaleway Instance wanted for the pool + [pools.{index}.placement-group-id] Placement group ID in which all the nodes of the pool will be created + [pools.{index}.autoscaling] Defines whether the autoscaling feature is enabled for the pool + pools.{index}.size Size (number of nodes) of the pool + [pools.{index}.min-size] Minimum size of the pool + [pools.{index}.max-size] Maximum size of the pool + [pools.{index}.container-runtime] Container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) + [pools.{index}.autohealing] Defines whether the autohealing feature is enabled for the pool + [pools.{index}.tags.{index}] Tags associated with the pool + [pools.{index}.kubelet-args.{key}] Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental [pools.{index}.upgrade-policy.max-unavailable] The maximum number of nodes that can be not ready at the same time [pools.{index}.upgrade-policy.max-surge] The maximum number of nodes to be created during the upgrade - [pools.{index}.zone] The Zone in which the Pool's node will be spawn in - [pools.{index}.root-volume-type] The system volume disk type (default_volume_type | l_ssd | b_ssd) - [pools.{index}.root-volume-size] The system volume disk size + [pools.{index}.zone] Zone in which the pool's nodes will be spawned + [pools.{index}.root-volume-type] System volume disk type (default_volume_type | l_ssd | b_ssd) + [pools.{index}.root-volume-size] System volume disk size [autoscaler-config.scale-down-disabled] Disable the cluster autoscaler [autoscaler-config.scale-down-delay-after-add] How long after scale up that scale down evaluation resumes [autoscaler-config.estimator] Type of resource estimator to be used in scale up (unknown_estimator | binpacking) @@ -47,8 +47,8 @@ ARGS: [autoscaler-config.scale-down-utilization-threshold] Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down [autoscaler-config.max-graceful-termination-sec] Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node [auto-upgrade.enable] Whether or not auto upgrade is enabled for the cluster - [auto-upgrade.maintenance-window.start-hour] The start hour of the 2-hour maintenance window - [auto-upgrade.maintenance-window.day] The day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) + [auto-upgrade.maintenance-window.start-hour] Start time of the two-hour maintenance window + [auto-upgrade.maintenance-window.day] Day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) [feature-gates.{index}] List of feature gates to enable [admission-plugins.{index}] List of admission plugins to enable [open-id-connect-config.issuer-url] URL of the provider which allows the API server to discover public signing keys @@ -57,14 +57,14 @@ ARGS: [open-id-connect-config.username-prefix] Prefix prepended to username [open-id-connect-config.groups-claim.{index}] JWT claim to use as the user's group [open-id-connect-config.groups-prefix] Prefix prepended to group claims - [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID Token + [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID token [apiserver-cert-sans.{index}] Additional Subject Alternative Names for the Kubernetes API server certificate [organization-id] Organization ID to use. If none is passed the default organization ID will be used [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) DEPRECATED ARGS: - [enable-dashboard] The enablement of the Kubernetes Dashboard in the cluster - [ingress] The Ingress Controller that will run in the cluster (unknown_ingress | none | nginx | traefik | traefik2) + [enable-dashboard] Defines if the Kubernetes Dashboard is enabled in the cluster + [ingress] Ingress Controller that will run in the cluster (unknown_ingress | none | nginx | traefik | traefik2) FLAGS: -h, --help help for create diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden index 2eb318f139..bf8d2269d6 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-delete-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to delete a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. +Deletes a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. USAGE: scw k8s cluster delete [arg=value ...] EXAMPLES: - Delete a given cluster + Delete a cluster scw k8s cluster delete 11111111-1111-1111-111111111111 ARGS: - cluster-id The ID of the cluster to delete + cluster-id ID of the cluster to delete [with-additional-resources] Set true if you want to delete all volumes (including retain volume type) and loadbalancers whose name start with cluster ID [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden index 1f5a8447e6..e43e4054e6 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-get-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get details about a specific Kubernetes cluster. +Get details about a specific Kubernetes cluster. USAGE: scw k8s cluster get [arg=value ...] EXAMPLES: - Get a given cluster + Get a cluster information scw k8s cluster get 11111111-1111-1111-111111111111 ARGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden index b7af2cedfb..18e29eadd3 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-available-versions-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list the versions that a specific Kubernetes cluster is allowed to upgrade to. Note that it will be every patch version greater than the actual one as well a one minor version ahead of the actual one. Upgrades skipping a minor version will not work. +List the versions that a specific Kubernetes cluster is allowed to upgrade to. Results will comprise every patch version greater than the current patch, as well as one minor version ahead of the current version. Any upgrade skipping a minor version will not work. USAGE: scw k8s cluster list-available-versions [arg=value ...] EXAMPLES: - List all available versions for a given cluster to upgrade to + List all available versions for a cluster to upgrade to scw k8s cluster list-available-versions 11111111-1111-1111-111111111111 ARGS: - cluster-id The ID of the cluster which the available Kuberentes versions will be listed from + cluster-id ID of the cluster which the available Kuberentes versions will be listed from [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden index 5c9e227798..ea8b52de8d 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-list-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all the existing Kubernetes clusters in an account. +List all the existing Kubernetes clusters in a specific Region. USAGE: scw k8s cluster list [arg=value ...] EXAMPLES: - List all the clusters on your default region + List all clusters on your default region scw k8s cluster list List the ready clusters on your default region @@ -16,12 +16,12 @@ EXAMPLES: scw k8s cluster list region=fr-par name=cluster1 ARGS: - [project-id] The project ID on which to filter the returned clusters - [order-by] The sort order of the returned clusters (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) - [name] The name on which to filter the returned clusters - [status] The status on which to filter the returned clusters (unknown | creating | ready | deleting | deleted | updating | locked | pool_required) - [type] The type on which to filter the returned clusters - [organization-id] The organization ID on which to filter the returned clusters + [project-id] Project ID on which to filter the returned clusters + [order-by] Sort order of the returned clusters (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) + [name] Name on which to filter the returned clusters + [status] Status on which to filter the returned clusters (unknown | creating | ready | deleting | deleted | updating | locked | pool_required) + [type] Type on which to filter the returned clusters + [organization-id] Organization ID on which to filter the returned clusters [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden index dd1bf8963d..8bdd22bff0 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-reset-admin-token-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable after) and create a new one. Note that the redownload of the kubeconfig will be necessary to keep interacting with the cluster (if the old admin token was used). +Reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to redownload kubeconfig in order to keep interacting with the cluster. USAGE: scw k8s cluster reset-admin-token [arg=value ...] EXAMPLES: - Reset the admin token for a given cluster + Reset the admin token for a cluster scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111 ARGS: - cluster-id The ID of the cluster of which the admin token will be renewed + cluster-id ID of the cluster on which the admin token will be renewed [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden index dd653291e6..be27dc10b2 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-update-usage.golden @@ -1,22 +1,22 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to update a specific Kubernetes cluster. Note that this method is not made to upgrade a Kubernetes cluster. +Update a specific Kubernetes cluster. Note that this method is designed to update details such as name, description, tags and configuration. However, you cannot upgrade a cluster with this method. To do so, use the dedicated endpoint. USAGE: scw k8s cluster update [arg=value ...] EXAMPLES: - Enable dashboard on a given cluster + Enable dashboard on a cluster scw k8s cluster update 11111111-1111-1111-111111111111 enable-dashboard=true - Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a given cluster + Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a cluster scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterFinished feature-gates.1=ServiceNodeExclusion ARGS: - cluster-id The ID of the cluster to update - [name] The new name of the cluster - [description] The new description of the cluster - [tags.{index}] The new tags associated with the cluster + cluster-id ID of the cluster to update + [name] New external name of the cluster + [description] New description of the cluster + [tags.{index}] New tags associated with the cluster [autoscaler-config.scale-down-disabled] Disable the cluster autoscaler [autoscaler-config.scale-down-delay-after-add] How long after scale up that scale down evaluation resumes [autoscaler-config.estimator] Type of resource estimator to be used in scale up (unknown_estimator | binpacking) @@ -28,8 +28,8 @@ ARGS: [autoscaler-config.scale-down-utilization-threshold] Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down [autoscaler-config.max-graceful-termination-sec] Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node [auto-upgrade.enable] Whether or not auto upgrade is enabled for the cluster - [auto-upgrade.maintenance-window.start-hour] The start hour of the 2-hour maintenance window - [auto-upgrade.maintenance-window.day] The day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) + [auto-upgrade.maintenance-window.start-hour] Start time of the two-hour maintenance window + [auto-upgrade.maintenance-window.day] Day of the week for the maintenance window (any | monday | tuesday | wednesday | thursday | friday | saturday | sunday) [feature-gates.{index}] List of feature gates to enable [admission-plugins.{index}] List of admission plugins to enable [open-id-connect-config.issuer-url] URL of the provider which allows the API server to discover public signing keys @@ -38,13 +38,13 @@ ARGS: [open-id-connect-config.username-prefix] Prefix prepended to username [open-id-connect-config.groups-claim.{index}] JWT claim to use as the user's group [open-id-connect-config.groups-prefix] Prefix prepended to group claims - [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID Token + [open-id-connect-config.required-claim.{index}] Multiple key=value pairs that describes a required claim in the ID token [apiserver-cert-sans.{index}] Additional Subject Alternative Names for the Kubernetes API server certificate [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) DEPRECATED ARGS: - [enable-dashboard] The new value of the Kubernetes Dashboard enablement - [ingress] The new Ingress Controller for the cluster (unknown_ingress | none | nginx | traefik | traefik2) + [enable-dashboard] New value of the Kubernetes Dashboard enablement + [ingress] New Ingress Controller for the cluster (unknown_ingress | none | nginx | traefik | traefik2) FLAGS: -h, --help help for update diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden index db0b8f8e3f..b5e301a9fa 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-upgrade-usage.golden @@ -1,21 +1,21 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to upgrade a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. +Upgrades a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. USAGE: scw k8s cluster upgrade [arg=value ...] EXAMPLES: - Upgrade a given cluster to Kubernetes version 1.24.7 (without upgrading the pools) + Upgrade a cluster to Kubernetes version 1.24.7 (without upgrading the pools) scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 - Upgrade a given cluster to Kubernetes version 1.24.7 (and upgrade the pools) + Upgrade a cluster to Kubernetes version 1.24.7 (and upgrade the pools) scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 upgrade-pools=true ARGS: - cluster-id The ID of the cluster to upgrade - version The new Kubernetes version of the cluster - [upgrade-pools] The enablement of the pools upgrade + cluster-id ID of the cluster to upgrade + version New Kubernetes version of the cluster + [upgrade-pools] Enablement of the pools upgrade [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden index 077a8c6fad..b937ddcecf 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-cluster-usage.golden @@ -10,8 +10,8 @@ USAGE: AVAILABLE COMMANDS: create Create a new cluster delete Delete a cluster - get Get a cluster - list List all the clusters + get Get specific cluster information + list List all clusters list-available-versions List available versions for a cluster reset-admin-token Reset the admin token of a cluster update Update a cluster diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden index ccb382c842..70997ed4e4 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-delete-usage.golden @@ -1,22 +1,22 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster for instance), you may experience some disruption of your applications. USAGE: scw k8s node delete [arg=value ...] EXAMPLES: - Delete a given node + Delete a node scw k8s node delete 11111111-1111-1111-111111111111 - Delete a given node without evicting workloads + Delete a node without evicting workloads scw k8s node delete 11111111-1111-1111-111111111111 skip-drain=true - Replace a given node by a new one + Replace a node by a new one scw k8s node delete 11111111-1111-1111-111111111111 replace=true ARGS: - node-id The ID of the node to replace + node-id ID of the node to replace [skip-drain] Skip draining node from its workload [replace] Add a new node after the deletion of this node [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden index 9576b867b7..914ce3476e 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-get-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get details about a specific Kubernetes node. +Get details about a specific Kubernetes node. USAGE: scw k8s node get [arg=value ...] EXAMPLES: - Get a given node + Get a node scw k8s node get 11111111-1111-1111-111111111111 ARGS: - node-id The ID of the requested node + node-id ID of the requested node [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden index 297e17d20a..676ac40462 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-list-usage.golden @@ -1,26 +1,26 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all the existing nodes for a specific Kubernetes cluster. +List all the existing nodes for a specific Kubernetes cluster. USAGE: scw k8s node list [arg=value ...] EXAMPLES: - List all the nodes in the given cluster + List all the nodes in the cluster scw k8s node list cluster-id=11111111-1111-1111-111111111111 - List all the nodes in the pool 2222222222222-2222-222222222222 in the given cluster + List all the nodes in the pool 2222222222222-2222-222222222222 in the cluster scw k8s node list cluster-id=11111111-1111-1111-111111111111 pool-id=2222222222222-2222-222222222222 - List all ready nodes in the given cluster + List all ready nodes in the cluster scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready ARGS: - cluster-id The cluster ID from which the nodes will be listed from - [pool-id] The pool ID on which to filter the returned nodes - [order-by] The sort order of the returned nodes (created_at_asc | created_at_desc) - [name] The name on which to filter the returned nodes - [status] The status on which to filter the returned nodes (unknown | creating | not_ready | ready | deleting | deleted | locked | rebooting | creation_error | upgrading | starting | registering) + cluster-id Cluster ID from which the nodes will be listed from + [pool-id] Pool ID on which to filter the returned nodes + [order-by] Sort order of the returned nodes (created_at_asc | created_at_desc) + [name] Name on which to filter the returned nodes + [status] Status on which to filter the returned nodes (unknown | creating | not_ready | ready | deleting | deleted | locked | rebooting | creation_error | upgrading | starting | registering) [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden index 628120a685..fd24b2881c 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-reboot-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to reboot a specific node. This node will frist be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Reboot a specific node. This node will first be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster, for instance), you may experience some disruption of your applications. USAGE: scw k8s node reboot [arg=value ...] EXAMPLES: - Reboot a given node + Reboot a node scw k8s node reboot 11111111-1111-1111-111111111111 ARGS: - node-id The ID of the node to reboot + node-id ID of the node to reboot [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden index 51893d33cf..41762effe0 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-replace-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. USAGE: scw k8s node replace [arg=value ...] EXAMPLES: - Replace a given node + Replace a node scw k8s node replace 11111111-1111-1111-111111111111 ARGS: - node-id The ID of the node to replace + node-id ID of the node to replace [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden index d10f66e4e7..07696542f5 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-node-usage.golden @@ -1,8 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -A node (short for worker node) is an abstraction for a Scaleway Instance. -It is part of a pool and is instantiated by Scaleway, making Kubernetes software installed and configured automatically on it. -Please note that Kubernetes nodes cannot be accessed with ssh. +A node (short for worker node) is an abstraction for a Scaleway Instance. A node is always part of a pool. Each of them will have Kubernetes software automatically installed and configured by Scaleway. Please note that Kubernetes nodes cannot be accessed with SSH. USAGE: scw k8s node diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden index 28f0145fb6..435ceda6d5 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-create-usage.golden @@ -1,38 +1,38 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to create a new pool in a specific Kubernetes cluster. +Create a new pool in a specific Kubernetes cluster. USAGE: scw k8s pool create [arg=value ...] EXAMPLES: - Create a pool named bar with 2 DEV1-XL on a given cluster + Create a pool named bar with 2 DEV1-XL on a cluster scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=bar node-type=DEV1-XL size=2 - Create a pool named fish with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a given cluster + Create a pool named 'fish' with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a cluster scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=fish node-type=GP1-L size=5 min-size=0 max-size=10 autoscaling=true autohealing=true container-runtime=containerd - Create a tagged pool named turtle with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a given cluster + Create a tagged pool named 'turtle' with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a cluster scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node-type=GP1-S size=1 placement-group-id=2222222222222-2222-222222222222 tags.0=turtle tags.1=placement-group ARGS: - cluster-id The ID of the cluster in which the pool will be created - name= The name of the pool - node-type=DEV1-M The node type is the type of Scaleway Instance wanted for the pool - [placement-group-id] The placement group ID in which all the nodes of the pool will be created - [autoscaling] The enablement of the autoscaling feature for the pool - size=1 The size (number of nodes) of the pool - [min-size] The minimum size of the pool - [max-size] The maximum size of the pool - [container-runtime] The container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) - [autohealing] The enablement of the autohealing feature for the pool - [tags.{index}] The tags associated with the pool - [kubelet-args.{key}] The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental + cluster-id ID of the cluster in which the pool will be created + name= Name of the pool + node-type=DEV1-M Node type is the type of Scaleway Instance wanted for the pool + [placement-group-id] Placement group ID in which all the nodes of the pool will be created + [autoscaling] Defines whether the autoscaling feature is enabled for the pool + size=1 Size (number of nodes) of the pool + [min-size] Minimum size of the pool + [max-size] Maximum size of the pool + [container-runtime] Container runtime for the nodes of the pool (unknown_runtime | docker | containerd | crio) + [autohealing] Defines whether the autohealing feature is enabled for the pool + [tags.{index}] Tags associated with the pool + [kubelet-args.{key}] Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental [upgrade-policy.max-unavailable] [upgrade-policy.max-surge] - [zone] The Zone in which the Pool's node will be spawn in - [root-volume-type] The system volume disk type (default_volume_type | l_ssd | b_ssd) - [root-volume-size] The system volume disk size + [zone] Zone in which the pool's nodes will be spawned + [root-volume-type] System volume disk type (default_volume_type | l_ssd | b_ssd) + [root-volume-size] System volume disk size [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden index 3f84d7c3f3..ab0aa557ae 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-delete-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to delete a specific pool from a cluster, deleting all the nodes associated with it. +Delete a specific pool from a cluster. All of the pool's nodes will also be deleted. USAGE: scw k8s pool delete [arg=value ...] EXAMPLES: - Delete a given pool + Delete a specific pool scw k8s pool delete 11111111-1111-1111-111111111111 ARGS: - pool-id The ID of the pool to delete + pool-id ID of the pool to delete [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden index 6d65f617b3..cbf1eb8068 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get details about a specific pool. +Get details about a specific pool in a Kubernetes cluster. USAGE: scw k8s pool get [arg=value ...] @@ -10,7 +10,7 @@ EXAMPLES: scw k8s pool get 11111111-1111-1111-111111111111 ARGS: - pool-id The ID of the requested pool + pool-id ID of the requested pool [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden index a3f674b0a0..d5161b277d 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-list-usage.golden @@ -1,28 +1,28 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all the existing pools for a specific Kubernetes cluster. +List all the existing pools for a specific Kubernetes cluster. USAGE: scw k8s pool list [arg=value ...] EXAMPLES: - List all pools for a given cluster + List all pools for a cluster scw k8s pool list cluster-id=11111111-1111-1111-111111111111 - List all scaling pools for a given cluster + List all scaling pools for a cluster scw k8s pool list cluster-id=11111111-1111-1111-111111111111 status=scaling - List all pools for a given cluster that contain the word foo in the pool name + List all pools for a cluster that contains the word 'foo' in the pool name scw k8s pool list cluster-id=11111111-1111-1111-111111111111 name=foo - List all pools for a given cluster and order them by ascending creation date + List all pools for a cluster and order them by ascending creation date scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at_asc ARGS: - cluster-id The ID of the cluster from which the pools will be listed from - [order-by] The sort order of the returned pools (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) - [name] The name on which to filter the returned pools - [status] The status on which to filter the returned pools (unknown | ready | deleting | deleted | scaling | warning | locked | upgrading) + cluster-id ID of the cluster from which the pools will be listed from + [order-by] Sort order of the returned pools (created_at_asc | created_at_desc | updated_at_asc | updated_at_desc | name_asc | name_desc | status_asc | status_desc | version_asc | version_desc) + [name] Name on which to filter the returned pools + [status] Status on which to filter the returned pools (unknown | ready | deleting | deleted | scaling | warning | locked | upgrading) [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden index 8c2c5025f1..a790f10739 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-update-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to update some attributes of a specific pool such as the size, the autoscaling enablement, the tags, ... +Update attributes of a specific pool, such as size, autoscaling settings, and tags. USAGE: scw k8s pool update [arg=value ...] @@ -16,14 +16,14 @@ EXAMPLES: scw k8s pool update 11111111-1111-1111-111111111111 tags.0=my tags.1=new tags.2=pool ARGS: - pool-id The ID of the pool to update - [autoscaling] The new value for the enablement of autoscaling for the pool - [size] The new size for the pool - [min-size] The new minimun size for the pool - [max-size] The new maximum size for the pool - [autohealing] The new value for the enablement of autohealing for the pool - [tags.{index}] The new tags associated with the pool - [kubelet-args.{key}] The new Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental + pool-id ID of the pool to update + [autoscaling] New value for the enablement of autoscaling for the pool + [size] New size for the pool + [min-size] New minimun size for the pool + [max-size] New maximum size for the pool + [autohealing] New value for the enablement of autohealing for the pool + [tags.{index}] New tags associated with the pool + [kubelet-args.{key}] New Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental [upgrade-policy.max-unavailable] [upgrade-policy.max-surge] [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden index dbce9fca21..4c129f4afa 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-upgrade-usage.golden @@ -1,17 +1,17 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. +Upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. USAGE: scw k8s pool upgrade [arg=value ...] EXAMPLES: - Upgrade a given pool to the Kubernetes version 1.24.7 + Upgrade a specific pool to the Kubernetes version 1.24.7 scw k8s pool upgrade 11111111-1111-1111-111111111111 version=1.24.7 ARGS: - pool-id The ID of the pool to upgrade - version The new Kubernetes version for the pool + pool-id ID of the pool to upgrade + version New Kubernetes version for the pool [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden index 30282c8243..7b8b306a03 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-pool-usage.golden @@ -1,8 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -A pool is a set of identical Nodes. A pool has a name, a size (its current number of nodes), nodes number limits (min, max) and a Scaleway instance type. -Changing those limits increases/decreases the size of a pool. Thus, when autoscaling is enabled, the pool will grow or shrink inside those limits, depending on its load. -A "default pool" is automatically created with every cluster. +A pool is a set of identical nodes. A pool has a name, a size (its current number of nodes), node number limits (min, max), and a Scaleway Instance type. Changing those limits increases/decreases the size of a pool. Thus, the pool will grow or shrink inside those limits when autoscaling is enabled, depending on its load. A "default pool" is automatically created with every cluster. USAGE: scw k8s pool diff --git a/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden index f67e16b6c4..42c00bdeb9 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-version-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to get a specific Kubernetes version and the details about the version. +Get a specific Kubernetes version and the details about the version. USAGE: scw k8s version get [arg=value ...] @@ -10,7 +10,7 @@ EXAMPLES: scw k8s version get 1.24.7 ARGS: - version-name The requested version name + version-name Requested version name [region=fr-par] Region to target. If none is passed will use default region from the config (fr-par | nl-ams | pl-waw) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden index 9a5e4aed6d..65f7045487 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-version-list-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This method allows to list all available versions for the creation of a new Kubernetes cluster. +List all available versions for the creation of a new Kubernetes cluster. USAGE: scw k8s version list [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden b/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden index ddff28fb75..7374a72399 100644 --- a/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden +++ b/cmd/scw/testdata/test-all-usage-k8s-version-usage.golden @@ -1,10 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -A version is a vanilla Kubernetes version like `x.y.z`. -It is composed of a major version x, a minor version y and a patch version z. -Scaleway's managed Kubernetes, Kapsule, will at least support the last patch version for the last three minor release. - -Also each version have a different set of container runtimes, CNIs, ingresses, feature gates and admission plugins available. +A version is a vanilla Kubernetes version like `x.y.z`. It comprises a major version x, a minor version y, and a patch version z. Scaleway's managed Kubernetes, Kapsule, will support at minimum the last patch version for the last three minor releases. Also, each version has a different set of container runtimes, CNIs, ingresses, feature gates, and admission plugins available. USAGE: scw k8s version diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-add-servers-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-add-servers-usage.golden index 118c520dc4..0b69fa7b3c 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-add-servers-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-add-servers-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Add a set of servers in a given backend. +For a given backend specified by its backend ID, add a set of backend servers (identified by their IP addresses) it should forward traffic to. These will be appended to any existing set of backend servers for this backend. USAGE: scw lb backend add-servers [arg=value ...] @@ -12,7 +12,7 @@ ARGS: [use-instance-server-public-ip] Use public IP address of the instance instead of the private one [baremetal-server-id.{index}] UIID of the baremetal server. [baremetal-server-tag.{index}] Tag of the baremetal server. - server-ip.{index} Set all IPs to add on your backend + server-ip.{index} List of IP addresses to add to backend servers [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-create-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-create-usage.golden index 0a30d1f663..de2302f5d5 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-create-usage.golden @@ -1,52 +1,52 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Create a backend in a given load balancer. +Create a new backend for a given Load Balancer, specifying its full configuration including protocol, port and forwarding algorithm. USAGE: scw lb backend create [arg=value ...] ARGS: - name= Resource name - forward-protocol Backend protocol. TCP or HTTP (tcp | http) - forward-port User sessions will be forwarded to this port of backend servers - forward-port-algorithm=roundrobin Load balancing algorithm (roundrobin | leastconn | first) - sticky-sessions=none Enables cookie-based session persistence (none | cookie | table) - [sticky-sessions-cookie-name] Cookie name for sticky sessions + name= Name for the backend + forward-protocol Protocol to be used by the backend when forwarding traffic to backend servers (tcp | http) + forward-port Port to be used by the backend when forwarding traffic to backend servers + forward-port-algorithm=roundrobin Load balancing algorithm to be used when determining which backend server to forward new traffic to (roundrobin | leastconn | first) + sticky-sessions=none Defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie TO stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server (none | cookie | table) + [sticky-sessions-cookie-name] Cookie name for cookie-based sticky sessions [health-check.mysql-config.user] - [health-check.check-max-retries] Number of consecutive unsuccessful health checks, after which the server will be considered dead + [health-check.check-max-retries] Number of consecutive unsuccessful health checks after which the server will be considered dead [health-check.pgsql-config.user] - [health-check.http-config.uri] HTTP uri used with the request - [health-check.http-config.method] HTTP method used with the request - [health-check.http-config.code] HTTP response code so the Healthcheck is considered successfull - [health-check.http-config.host-header] HTTP host header used with the request - [health-check.https-config.uri] HTTP uri used with the request - [health-check.https-config.method] HTTP method used with the request - [health-check.https-config.code] HTTP response code so the Healthcheck is considered successfull - [health-check.https-config.host-header] HTTP host header used with the request - [health-check.https-config.sni] Specifies the SNI to use to do health checks over SSL - [health-check.port] TCP port to use for the backend server health check + [health-check.http-config.uri] HTTP URI used for the health check + [health-check.http-config.method] HTTP method used for the health check + [health-check.http-config.code] HTTP response code expected for a successful health check + [health-check.http-config.host-header] HTTP host header used for the health check + [health-check.https-config.uri] HTTP URI used for the health check + [health-check.https-config.method] HTTP method used for the health check + [health-check.https-config.code] HTTP response code expected for a successful health check + [health-check.https-config.host-header] HTTP host header used for the health check + [health-check.https-config.sni] SNI used for SSL health checks + [health-check.port] Port to use for the backend server health check [health-check.check-timeout] Maximum time a backend server has to reply to the health check - [health-check.check-delay] Time between two consecutive health checks - [health-check.check-send-proxy] It defines whether the health check should be done considering the proxy protocol - lb-id Load balancer ID + [health-check.check-delay] Time to wait between two consecutive health checks + [health-check.check-send-proxy] Defines whether proxy protocol should be activated for the health check + lb-id Load Balancer ID [instance-server-id.{index}] UIID of the instance server. [instance-server-tag.{index}] Tag of the instance server. [use-instance-server-public-ip] Use public IP address of the instance instead of the private one [baremetal-server-id.{index}] UIID of the baremetal server. [baremetal-server-tag.{index}] Tag of the baremetal server. - server-ip.{index} Backend server IP addresses list (IPv4 or IPv6) - [timeout-server] Maximum server connection inactivity time (allowed time the server has to process the request) - [timeout-connect] Maximum initial server connection establishment time - [timeout-tunnel] Maximum tunnel inactivity time after Websocket is established (take precedence over client and server timeout) - [on-marked-down-action] Modify what occurs when a backend server is marked down (on_marked_down_action_none | shutdown_sessions) - [proxy-protocol] PROXY protocol, forward client's address (must be supported by backend servers software) (proxy_protocol_unknown | proxy_protocol_none | proxy_protocol_v1 | proxy_protocol_v2 | proxy_protocol_v2_ssl | proxy_protocol_v2_ssl_cn) - [failover-host] Scaleway S3 bucket website to be served in case all backend servers are down - [ssl-bridging] Enable SSL between load balancer and backend servers - [ignore-ssl-server-verify] Set to true to ignore server certificate verification + server-ip.{index} List of backend server IP addresses (IPv4 or IPv6) the backend should forward traffic to + [timeout-server] Maximum allowed time for a backend server to process a request + [timeout-connect] Maximum allowed time for establishing a connection to a backend server + [timeout-tunnel] Maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout) + [on-marked-down-action] Action to take when a backend server is marked as down (on_marked_down_action_none | shutdown_sessions) + [proxy-protocol] PROXY protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. PROXY protocol must be supported by the backend servers' software (proxy_protocol_unknown | proxy_protocol_none | proxy_protocol_v1 | proxy_protocol_v2 | proxy_protocol_v2_ssl | proxy_protocol_v2_ssl_cn) + [failover-host] Scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. Do not include the scheme (eg https://) + [ssl-bridging] Defines whether to enable SSL between the Load Balancer and backend servers + [ignore-ssl-server-verify] Defines whether the server certificate verification should be ignored [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) DEPRECATED ARGS: - [send-proxy-v2] Deprecated in favor of proxy_protocol field ! + [send-proxy-v2] Deprecated in favor of proxy_protocol field FLAGS: -h, --help help for create diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-delete-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-delete-usage.golden index d26236e0b6..c46fd64820 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-delete-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Delete a backend in a given load balancer. +Delete a backend of a given Load Balancer, specified by its backend ID. This action is irreversible and cannot be undone. USAGE: scw lb backend delete [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-get-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-get-usage.golden index 76faa9f8bc..f25a83c57b 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get a backend in a given load balancer. +Get the full details of a given backend, specified by its backend ID. The response contains the backend's full configuration parameters including protocol, port and forwarding algorithm. USAGE: scw lb backend get [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-list-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-list-usage.golden index 4e455221b6..f4e83513d6 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-list-usage.golden @@ -1,14 +1,14 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List backends in a given load balancer. +List all the backends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each backend. The response is an array of backend objects, containing full details of each one including their configuration parameters such as protocol, port and forwarding algorithm. USAGE: scw lb backend list [arg=value ...] ARGS: - lb-id Load balancer ID - [name] Use this to search by name - [order-by] Response order (created_at_asc | created_at_desc | name_asc | name_desc) + lb-id Load Balancer ID + [name] Name of the backend to filter for + [order-by] Sort order of backends in the response (created_at_asc | created_at_desc | name_asc | name_desc) [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-remove-servers-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-remove-servers-usage.golden index 7aee807752..3dff58ec82 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-remove-servers-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-remove-servers-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Remove a set of servers for a given backend. +For a given backend specified by its backend ID, remove the specified backend servers (identified by their IP addresses) so that it no longer forwards traffic to them. USAGE: scw lb backend remove-servers [arg=value ...] @@ -12,7 +12,7 @@ ARGS: [use-instance-server-public-ip] Use public IP address of the instance instead of the private one [baremetal-server-id.{index}] UIID of the baremetal server. [baremetal-server-tag.{index}] Tag of the baremetal server. - server-ip.{index} Set all IPs to remove of your backend + server-ip.{index} List of IP addresses to remove from backend servers [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-set-servers-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-set-servers-usage.golden index 0e7c4d3d17..1587057c9c 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-set-servers-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-set-servers-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Define all servers in a given backend. +For a given backend specified by its backend ID, define the set of backend servers (identified by their IP addresses) that it should forward traffic to. Any existing backend servers configured for this backend will be removed. USAGE: scw lb backend set-servers [arg=value ...] @@ -12,7 +12,7 @@ ARGS: [use-instance-server-public-ip] Use public IP address of the instance instead of the private one [baremetal-server-id.{index}] UIID of the baremetal server. [baremetal-server-tag.{index}] Tag of the baremetal server. - server-ip.{index} Set all IPs to add on your backend and remove all other + server-ip.{index} List of IP addresses for backend servers. Any other existing backend servers will be removed [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-update-healthcheck-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-update-healthcheck-usage.golden index 195467560d..6e78d33207 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-update-healthcheck-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-update-healthcheck-usage.golden @@ -1,28 +1,28 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Update an healthcheck for a given backend. +Update the configuration of the health check performed by a given backend to verify the health of its backend servers, identified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. USAGE: scw lb backend update-healthcheck [arg=value ...] ARGS: - port Specify the port used to health check - check-delay Time between two consecutive health checks + port Port to use for the backend server health check + check-delay Time to wait between two consecutive health checks check-timeout Maximum time a backend server has to reply to the health check check-max-retries Number of consecutive unsuccessful health checks, after which the server will be considered dead backend-id Backend ID [mysql-config.user] [pgsql-config.user] - [http-config.uri] HTTP uri used with the request - [http-config.method] HTTP method used with the request - [http-config.code] HTTP response code so the Healthcheck is considered successfull - [http-config.host-header] HTTP host header used with the request - [https-config.uri] HTTP uri used with the request - [https-config.method] HTTP method used with the request - [https-config.code] HTTP response code so the Healthcheck is considered successfull - [https-config.host-header] HTTP host header used with the request - [https-config.sni] Specifies the SNI to use to do health checks over SSL - [check-send-proxy] It defines whether the health check should be done considering the proxy protocol + [http-config.uri] HTTP URI used for the health check + [http-config.method] HTTP method used for the health check + [http-config.code] HTTP response code expected for a successful health check + [http-config.host-header] HTTP host header used for the health check + [https-config.uri] HTTP URI used for the health check + [https-config.method] HTTP method used for the health check + [https-config.code] HTTP response code expected for a successful health check + [https-config.host-header] HTTP host header used for the health check + [https-config.sni] SNI used for SSL health checks + [check-send-proxy] Defines whether proxy protocol should be activated for the health check [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-update-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-update-usage.golden index 9ed8a841a2..26673ac76a 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-update-usage.golden @@ -1,30 +1,30 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Update a backend in a given load balancer. +Update a backend of a given Load Balancer, specified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. USAGE: scw lb backend update [arg=value ...] ARGS: - backend-id Backend ID to update - name Resource name - forward-protocol Backend protocol. TCP or HTTP (tcp | http) - forward-port User sessions will be forwarded to this port of backend servers - forward-port-algorithm Load balancing algorithm (roundrobin | leastconn | first) - sticky-sessions Enable cookie-based session persistence (none | cookie | table) - [sticky-sessions-cookie-name] Cookie name for sticky sessions - [timeout-server] Maximum server connection inactivity time (allowed time the server has to process the request) - [timeout-connect] Maximum initial server connection establishment time - [timeout-tunnel] Maximum tunnel inactivity time after Websocket is established (take precedence over client and server timeout) - [on-marked-down-action] Modify what occurs when a backend server is marked down (on_marked_down_action_none | shutdown_sessions) - [proxy-protocol] PROXY protocol, forward client's address (must be supported by backend servers software) (proxy_protocol_unknown | proxy_protocol_none | proxy_protocol_v1 | proxy_protocol_v2 | proxy_protocol_v2_ssl | proxy_protocol_v2_ssl_cn) - [failover-host] Scaleway S3 bucket website to be served in case all backend servers are down - [ssl-bridging] Enable SSL between load balancer and backend servers - [ignore-ssl-server-verify] Set to true to ignore server certificate verification + backend-id Backend ID + name Backend name + forward-protocol Protocol to be used by the backend when forwarding traffic to backend servers (tcp | http) + forward-port Port to be used by the backend when forwarding traffic to backend servers + forward-port-algorithm Load balancing algorithm to be used when determining which backend server to forward new traffic to (roundrobin | leastconn | first) + sticky-sessions Defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie to stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server (none | cookie | table) + [sticky-sessions-cookie-name] Cookie name for cookie-based sticky sessions + [timeout-server] Maximum allowed time for a backend server to process a request + [timeout-connect] Maximum allowed time for establishing a connection to a backend server + [timeout-tunnel] Maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout) + [on-marked-down-action] Action to take when a backend server is marked down (on_marked_down_action_none | shutdown_sessions) + [proxy-protocol] PROXY protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. PROXY protocol must be supported by the backend servers' software (proxy_protocol_unknown | proxy_protocol_none | proxy_protocol_v1 | proxy_protocol_v2 | proxy_protocol_v2_ssl | proxy_protocol_v2_ssl_cn) + [failover-host] Scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. Do not include the scheme (eg https://) + [ssl-bridging] Defines whether to enable SSL bridging between the Load Balancer and backend servers + [ignore-ssl-server-verify] Defines whether the server certificate verification should be ignored [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) DEPRECATED ARGS: - [send-proxy-v2] Deprecated in favor of proxy_protocol field! + [send-proxy-v2] Deprecated in favor of proxy_protocol field FLAGS: -h, --help help for update diff --git a/cmd/scw/testdata/test-all-usage-lb-backend-usage.golden b/cmd/scw/testdata/test-all-usage-lb-backend-usage.golden index 2c22d665c9..7fc6108772 100644 --- a/cmd/scw/testdata/test-all-usage-lb-backend-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-backend-usage.golden @@ -6,15 +6,15 @@ USAGE: scw lb backend AVAILABLE COMMANDS: - add-servers Add a set of servers in a given backend - create Create a backend in a given load balancer - delete Delete a backend in a given load balancer - get Get a backend in a given load balancer - list List backends in a given load balancer + add-servers Add a set of backend servers to a given backend + create Create a backend for a given Load Balancer + delete Delete a backend of a given Load Balancer + get Get a backend of a given Load Balancer + list List the backends of a given Load Balancer remove-servers Remove a set of servers for a given backend - set-servers Define all servers in a given backend - update Update a backend in a given load balancer - update-healthcheck Update an healthcheck for a given backend + set-servers Define all backend servers for a given backend + update Update a backend of a given Load Balancer + update-healthcheck Update a health check for a given backend FLAGS: -h, --help help for backend diff --git a/cmd/scw/testdata/test-all-usage-lb-certificate-create-usage.golden b/cmd/scw/testdata/test-all-usage-lb-certificate-create-usage.golden index 5cc20d0428..754c33c3a8 100644 --- a/cmd/scw/testdata/test-all-usage-lb-certificate-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-certificate-create-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Generate a new TLS certificate using Let's Encrypt or import your certificate. +Generate a new SSL/TLS certificate for a given Load Balancer. You can choose to create a Let's Encrypt certificate, or import a custom certificate. USAGE: scw lb certificate create [arg=value ...] ARGS: - lb-id Load balancer ID - name= Certificate name - [letsencrypt-common-name] Main domain name of certificate (make sure this domain exists and resolves to your load balancer HA IP) - [letsencrypt-alternative-name.{index}] Alternative domain names (make sure all domain names exists and resolves to your load balancer HA IP) - [custom-certificate-chain] The full PEM-formatted include an entire certificate chain including public key, private key, and optionally certificate authorities. + lb-id Load Balancer ID + name= Name for the certificate + [letsencrypt-common-name] Main domain name of certificate (this domain must exist and resolve to your Load Balancer IP address) + [letsencrypt-alternative-name.{index}] Alternative domain names (all domain names must exist and resolve to your Load Balancer IP address) + [custom-certificate-chain] Full PEM-formatted certificate, consisting of the entire certificate chain including public key, private key, and (optionally) Certificate Authorities [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-certificate-delete-usage.golden b/cmd/scw/testdata/test-all-usage-lb-certificate-delete-usage.golden index a8f7a7ea65..5da0aea3b0 100644 --- a/cmd/scw/testdata/test-all-usage-lb-certificate-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-certificate-delete-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Delete a TLS certificate. +Delete an SSL/TLS certificate, specified by its certificate ID. Deleting a certificate is irreversible and cannot be undone. USAGE: scw lb certificate delete [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lb-certificate-get-usage.golden b/cmd/scw/testdata/test-all-usage-lb-certificate-get-usage.golden index 49997c749f..7801e0593a 100644 --- a/cmd/scw/testdata/test-all-usage-lb-certificate-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-certificate-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get a TLS certificate. +Get information for a particular SSL/TLS certificate, specified by its certificate ID. The response returns full details of the certificate, including its type, main domain name, and alternative domain names. USAGE: scw lb certificate get [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lb-certificate-list-usage.golden b/cmd/scw/testdata/test-all-usage-lb-certificate-list-usage.golden index b0f1916976..bc00018526 100644 --- a/cmd/scw/testdata/test-all-usage-lb-certificate-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-certificate-list-usage.golden @@ -1,14 +1,14 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List all TLS certificates on a given load balancer. +List all the SSL/TLS certificates on a given Load Balancer. The response is an array of certificate objects, which are by default listed in ascending order of creation date. USAGE: scw lb certificate list [arg=value ...] ARGS: - lb-id Load balancer ID - [order-by] Response order (created_at_asc | created_at_desc | name_asc | name_desc) - [name] Use this to search by name + lb-id Load Balancer ID + [order-by] Sort order of certificates in the response (created_at_asc | created_at_desc | name_asc | name_desc) + [name] Certificate name to filter for, only certificates of this name will be returned [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-certificate-update-usage.golden b/cmd/scw/testdata/test-all-usage-lb-certificate-update-usage.golden index d3fa49e3eb..9208dc80b0 100644 --- a/cmd/scw/testdata/test-all-usage-lb-certificate-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-certificate-update-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Update a TLS certificate. +Update the name of a particular SSL/TLS certificate, specified by its certificate ID. USAGE: scw lb certificate update [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lb-certificate-usage.golden b/cmd/scw/testdata/test-all-usage-lb-certificate-usage.golden index 043b48fbb9..a19f1a94cb 100644 --- a/cmd/scw/testdata/test-all-usage-lb-certificate-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-certificate-usage.golden @@ -6,11 +6,11 @@ USAGE: scw lb certificate AVAILABLE COMMANDS: - create Create a TLS certificate - delete Delete a TLS certificate - get Get a TLS certificate - list List all TLS certificates on a given load balancer - update Update a TLS certificate + create Create an SSL/TLS certificate + delete Delete an SSL/TLS certificate + get Get an SSL/TLS certificate + list List all SSL/TLS certificates on a given Load Balancer + update Update an SSL/TLS certificate FLAGS: -h, --help help for certificate diff --git a/cmd/scw/testdata/test-all-usage-lb-frontend-create-usage.golden b/cmd/scw/testdata/test-all-usage-lb-frontend-create-usage.golden index 0887c38afc..4b05014759 100644 --- a/cmd/scw/testdata/test-all-usage-lb-frontend-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-frontend-create-usage.golden @@ -1,22 +1,22 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Create a frontend in a given load balancer. +Create a new frontend for a given Load Balancer, specifying its configuration including the port it should listen on and the backend to attach it to. USAGE: scw lb frontend create [arg=value ...] ARGS: - name= Resource name - inbound-port TCP port to listen on the front side - lb-id Load balancer ID - backend-id Backend ID - [timeout-client] Set the maximum inactivity time on the client side - [certificate-ids.{index}] List of certificate IDs to bind on the frontend - [enable-http3] Activate HTTP 3 protocol (beta) + name= Name for the frontend + inbound-port Port the frontend should listen on + lb-id Load Balancer ID (ID of the Load Balancer to attach the frontend to) + backend-id Backend ID (ID of the backend the frontend should pass traffic to) + [timeout-client] Maximum allowed inactivity time on the client side + [certificate-ids.{index}] List of SSL/TLS certificate IDs to bind to the frontend + [enable-http3] Defines whether to enable HTTP/3 protocol on the frontend [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) DEPRECATED ARGS: - [certificate-id] Certificate ID, deprecated in favor of certificate_ids array ! + [certificate-id] Certificate ID, deprecated in favor of certificate_ids array FLAGS: -h, --help help for create diff --git a/cmd/scw/testdata/test-all-usage-lb-frontend-delete-usage.golden b/cmd/scw/testdata/test-all-usage-lb-frontend-delete-usage.golden index 18c84818a0..821aeaa589 100644 --- a/cmd/scw/testdata/test-all-usage-lb-frontend-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-frontend-delete-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Delete a frontend. +Delete a given frontend, specified by its frontend ID. This action is irreversible and cannot be undone. USAGE: scw lb frontend delete [arg=value ...] ARGS: - frontend-id Frontend ID to delete + frontend-id ID of the frontend to delete [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-frontend-get-usage.golden b/cmd/scw/testdata/test-all-usage-lb-frontend-get-usage.golden index 8ef041303e..ee8e2e1d85 100644 --- a/cmd/scw/testdata/test-all-usage-lb-frontend-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-frontend-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get a frontend. +Get the full details of a given frontend, specified by its frontend ID. The response contains the frontend's full configuration parameters including the backend it is attached to, the port it listens on, and any certificates it has. USAGE: scw lb frontend get [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lb-frontend-list-usage.golden b/cmd/scw/testdata/test-all-usage-lb-frontend-list-usage.golden index e8b86658f9..1a6b4f5e83 100644 --- a/cmd/scw/testdata/test-all-usage-lb-frontend-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-frontend-list-usage.golden @@ -1,14 +1,14 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List frontends in a given load balancer. +List all the frontends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each frontend. The response is an array of frontend objects, containing full details of each one including the port they listen on and the backend they are attached to. USAGE: scw lb frontend list [arg=value ...] ARGS: - lb-id Load balancer ID - [name] Use this to search by name - [order-by] Response order (created_at_asc | created_at_desc | name_asc | name_desc) + lb-id Load Balancer ID + [name] Name of the frontend to filter for + [order-by] Sort order of frontends in the response (created_at_asc | created_at_desc | name_asc | name_desc) [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-frontend-update-usage.golden b/cmd/scw/testdata/test-all-usage-lb-frontend-update-usage.golden index cfbdcca6e1..9953a7fcf8 100644 --- a/cmd/scw/testdata/test-all-usage-lb-frontend-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-frontend-update-usage.golden @@ -1,22 +1,22 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Update a frontend. +Update a given frontend, specified by its frontend ID. You can update configuration parameters including its name and the port it listens on. Note that the request type is PUT and not PATCH. You must set all parameters. USAGE: scw lb frontend update [arg=value ...] ARGS: frontend-id Frontend ID - name Resource name - inbound-port TCP port to listen on the front side - backend-id Backend ID - [timeout-client] Client session maximum inactivity time - [certificate-ids.{index}] List of certificate IDs to bind on the frontend - [enable-http3] Activate HTTP 3 protocol (beta) + name Frontend name + inbound-port Port the frontend should listen on + backend-id Backend ID (ID of the backend the frontend should pass traffic to) + [timeout-client] Maximum allowed inactivity time on the client side + [certificate-ids.{index}] List of SSL/TLS certificate IDs to bind to the frontend + [enable-http3] Defines whether to enable HTTP/3 protocol on the frontend [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) DEPRECATED ARGS: - [certificate-id] Certificate ID, deprecated in favor of `certificate_ids` array! + [certificate-id] Certificate ID, deprecated in favor of certificate_ids array FLAGS: -h, --help help for update diff --git a/cmd/scw/testdata/test-all-usage-lb-frontend-usage.golden b/cmd/scw/testdata/test-all-usage-lb-frontend-usage.golden index c1add1f2f3..6625a8c9c2 100644 --- a/cmd/scw/testdata/test-all-usage-lb-frontend-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-frontend-usage.golden @@ -6,10 +6,10 @@ USAGE: scw lb frontend AVAILABLE COMMANDS: - create Create a frontend in a given load balancer + create Create a frontend in a given Load Balancer delete Delete a frontend get Get a frontend - list List frontends in a given load balancer + list List frontends of a given Load Balancer update Update a frontend FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-private-network-attach-usage.golden b/cmd/scw/testdata/test-all-usage-lb-private-network-attach-usage.golden index c36c8fa8ea..5653132794 100644 --- a/cmd/scw/testdata/test-all-usage-lb-private-network-attach-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-private-network-attach-usage.golden @@ -1,13 +1,13 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Add load balancer on instance private network. +Attach a specified Load Balancer to a specified Private Network, defining a static or DHCP configuration for the Load Balancer on the network. USAGE: scw lb private-network attach [arg=value ...] ARGS: - lb-id Load balancer ID - private-network-id Set your instance private network id + lb-id Load Balancer ID + private-network-id Private Network ID [static-config.ip-address.{index}] [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) diff --git a/cmd/scw/testdata/test-all-usage-lb-private-network-detach-usage.golden b/cmd/scw/testdata/test-all-usage-lb-private-network-detach-usage.golden index fdf36de2df..d7522cd76a 100644 --- a/cmd/scw/testdata/test-all-usage-lb-private-network-detach-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-private-network-detach-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Remove load balancer of private network. +Detach a specified Load Balancer from a specified Private Network. USAGE: scw lb private-network detach [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lb-private-network-list-usage.golden b/cmd/scw/testdata/test-all-usage-lb-private-network-list-usage.golden index 241731657e..bc7f36e845 100644 --- a/cmd/scw/testdata/test-all-usage-lb-private-network-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-private-network-list-usage.golden @@ -1,13 +1,13 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List attached private network of load balancer. +List the Private Networks attached to a given Load Balancer, specified by its Load Balancer ID. The response is an array of Private Network objects, giving information including the status, configuration, name and creation date of each Private Network. USAGE: scw lb private-network list [arg=value ...] ARGS: - [order-by] Response order (created_at_asc | created_at_desc) - lb-id Load balancer ID + [order-by] Sort order of Private Network objects in the response (created_at_asc | created_at_desc) + lb-id Load Balancer ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-private-network-usage.golden b/cmd/scw/testdata/test-all-usage-lb-private-network-usage.golden index 06a1f9d9bf..f3d65b1629 100644 --- a/cmd/scw/testdata/test-all-usage-lb-private-network-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-private-network-usage.golden @@ -6,9 +6,9 @@ USAGE: scw lb private-network AVAILABLE COMMANDS: - attach Add load balancer on instance private network - detach Remove load balancer of private network - list List attached private network of load balancer + attach Attach a Load Balancer to a Private Network + detach Detach Load Balancer from Private Network + list List Private Networks attached to a Load Balancer FLAGS: -h, --help help for private-network diff --git a/cmd/scw/testdata/test-all-usage-lb-route-create-usage.golden b/cmd/scw/testdata/test-all-usage-lb-route-create-usage.golden index b80297cd7e..5accaa25d5 100644 --- a/cmd/scw/testdata/test-all-usage-lb-route-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-route-create-usage.golden @@ -1,14 +1,14 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Create a backend redirection. +Create a new route on a given frontend. To configure a route, specify the backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). USAGE: scw lb route create [arg=value ...] ARGS: - [frontend-id] Origin of redirection - [backend-id] Destination of destination - [match.sni] Server Name Indication TLS extension (SNI) + [frontend-id] ID of the source frontend to create the route on + [backend-id] ID of the target backend for the route + [match.sni] Server Name Indication (SNI) value to match [match.host-header] HTTP host header to match [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) diff --git a/cmd/scw/testdata/test-all-usage-lb-route-delete-usage.golden b/cmd/scw/testdata/test-all-usage-lb-route-delete-usage.golden index 3d77607325..d49f47c6ec 100644 --- a/cmd/scw/testdata/test-all-usage-lb-route-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-route-delete-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Delete a backend redirection. +Delete an existing route, specified by its route ID. Deleting a route is permanent, and cannot be undone. USAGE: scw lb route delete [arg=value ...] ARGS: - route-id Route id to delete + route-id Route ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-route-get-usage.golden b/cmd/scw/testdata/test-all-usage-lb-route-get-usage.golden index c684ec32c3..20f21b98af 100644 --- a/cmd/scw/testdata/test-all-usage-lb-route-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-route-get-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get single backend redirection. +Retrieve information about an existing route, specified by its route ID. Its full details, origin frontend, target backend and match condition, are returned in the response object. USAGE: scw lb route get [arg=value ...] ARGS: - route-id Id of route to get + route-id Route ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lb-route-list-usage.golden b/cmd/scw/testdata/test-all-usage-lb-route-list-usage.golden index 8fad404708..e3fb1931bf 100644 --- a/cmd/scw/testdata/test-all-usage-lb-route-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-route-list-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List all backend redirections. +List all routes for a given frontend. The response is an array of routes, each one with a specified backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). USAGE: scw lb route list [arg=value ...] ARGS: - [order-by] Response order (created_at_asc | created_at_desc) + [order-by] Sort order of routes in the response (created_at_asc | created_at_desc) [frontend-id] [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) diff --git a/cmd/scw/testdata/test-all-usage-lb-route-update-usage.golden b/cmd/scw/testdata/test-all-usage-lb-route-update-usage.golden index 28da29ecae..a2acf46b06 100644 --- a/cmd/scw/testdata/test-all-usage-lb-route-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-route-update-usage.golden @@ -1,14 +1,14 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Edit a backend redirection. +Update the configuration of an existing route, specified by its route ID. USAGE: scw lb route update [arg=value ...] ARGS: - route-id Route id to update - [backend-id] Backend id of redirection - [match.sni] Server Name Indication TLS extension (SNI) + route-id Route ID + [backend-id] ID of the target backend for the route + [match.sni] Server Name Indication (SNI) value to match [match.host-header] HTTP host header to match [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) diff --git a/cmd/scw/testdata/test-all-usage-lb-route-usage.golden b/cmd/scw/testdata/test-all-usage-lb-route-usage.golden index 63dc19cb59..e7c4d21701 100644 --- a/cmd/scw/testdata/test-all-usage-lb-route-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-route-usage.golden @@ -6,11 +6,11 @@ USAGE: scw lb route AVAILABLE COMMANDS: - create Create a backend redirection - delete Delete a backend redirection - get Get single backend redirection - list List all backend redirections - update Edit a backend redirection + create Create a route + delete Delete a route + get Get a route + list List all routes + update Update a route FLAGS: -h, --help help for route diff --git a/cmd/scw/testdata/test-all-usage-lb-usage.golden b/cmd/scw/testdata/test-all-usage-lb-usage.golden index 5e2dcc17ab..470399c0c0 100644 --- a/cmd/scw/testdata/test-all-usage-lb-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lb-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -This API allows you to manage your load balancer service. +This API allows you to manage your Scaleway Load Balancer services. USAGE: scw lb diff --git a/cmd/scw/testdata/test-all-usage-lbacl-create-usage.golden b/cmd/scw/testdata/test-all-usage-lbacl-create-usage.golden index 8bc79d2460..a1f99245a7 100644 --- a/cmd/scw/testdata/test-all-usage-lbacl-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbacl-create-usage.golden @@ -1,24 +1,24 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Create an ACL for a given frontend. +Create a new ACL for a given frontend. Each ACL must have a name, an action to perform (allow or deny), and a match rule (the action is carried out when the incoming traffic matches the rule). USAGE: scw lb acl create [arg=value ...] ARGS: - frontend-id ID of your frontend - name= Name of your ACL ressource - [action.type] The action type (allow | deny | redirect) + frontend-id Frontend ID to attach the ACL to + name= ACL name + [action.type] Action to take when incoming traffic matches an ACL filter (allow | deny | redirect) [action.redirect.type] Redirect type (location | scheme) - [action.redirect.target] Redirect target (target URL for `location`, or target `scheme`) + [action.redirect.target] Redirect target. For a location redirect, you can use a URL e.g. `https://scaleway.com`. Using a scheme name (e.g. `https`, `http`, `ftp`, `git`) will replace the request's original scheme. This can be useful to implement HTTP to HTTPS redirects. Valid placeholders that can be used in a `location` redirect to preserve parts of the original request in the redirection URL are {{ host }}, {{ query }}, {{ path }} and {{ scheme }} [action.redirect.code] HTTP redirect code to use. Valid values are 301, 302, 303, 307 and 308. Default value is 302 - [match.ip-subnet.{index}] A list of IPs or CIDR v4/v6 addresses of the client of the session to match - [match.http-filter] The HTTP filter to match (acl_http_filter_none | path_begin | path_end | regex | http_header_match) - [match.http-filter-value.{index}] A list of possible values to match for the given HTTP filter - [match.http-filter-option] A exra parameter. You can use this field with http_header_match acl type to set the header name to filter - [match.invert] If set to `true`, the ACL matching condition will be of type "UNLESS" - index Order between your Acls (ascending order, 0 is first acl executed) - [description] Description of your ACL ressource + [match.ip-subnet.{index}] List of IPs or CIDR v4/v6 addresses to filter for from the client side + [match.http-filter] Type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends (acl_http_filter_none | path_begin | path_end | regex | http_header_match) + [match.http-filter-value.{index}] List of values to filter for + [match.http-filter-option] Name of the HTTP header to filter on if `http_header_match` was selected in `http_filter` + [match.invert] Defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match + index Priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed) + [description] ACL description [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbacl-delete-usage.golden b/cmd/scw/testdata/test-all-usage-lbacl-delete-usage.golden index 9bc33a6478..b44a474670 100644 --- a/cmd/scw/testdata/test-all-usage-lbacl-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbacl-delete-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Delete an ACL. +Delete an ACL, specified by its ACL ID. Deleting an ACL is irreversible and cannot be undone. USAGE: scw lb acl delete [arg=value ...] ARGS: - acl-id ID of your ACL ressource + acl-id ACL ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbacl-get-usage.golden b/cmd/scw/testdata/test-all-usage-lbacl-get-usage.golden index 6d3eee69ca..1b5303486e 100644 --- a/cmd/scw/testdata/test-all-usage-lbacl-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbacl-get-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get an ACL. +Get information for a particular ACL, specified by its ACL ID. The response returns full details of the ACL, including its name, action, match rule and frontend. USAGE: scw lb acl get [arg=value ...] ARGS: - acl-id ID of your ACL ressource + acl-id ACL ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbacl-list-usage.golden b/cmd/scw/testdata/test-all-usage-lbacl-list-usage.golden index 2ae491aa17..108ae420a0 100644 --- a/cmd/scw/testdata/test-all-usage-lbacl-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbacl-list-usage.golden @@ -1,14 +1,14 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List ACL for a given frontend. +List the ACLs for a given frontend, specified by its frontend ID. The response is an array of ACL objects, each one representing an ACL that denies or allows traffic based on certain conditions. USAGE: scw lb acl list [arg=value ...] ARGS: - frontend-id ID of your frontend - [order-by] Response order (created_at_asc | created_at_desc | name_asc | name_desc) - [name] Filter acl per name + frontend-id Frontend ID (ACLs attached to this frontend will be returned in the response) + [order-by] Sort order of ACLs in the response (created_at_asc | created_at_desc | name_asc | name_desc) + [name] ACL name to filter for [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbacl-set-usage.golden b/cmd/scw/testdata/test-all-usage-lbacl-set-usage.golden index 2707f079d4..fcd7f01c1a 100644 --- a/cmd/scw/testdata/test-all-usage-lbacl-set-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbacl-set-usage.golden @@ -1,24 +1,24 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Set all ACLs for a given frontend. +For a given frontend specified by its frontend ID, define and add the complete set of ACLS for that frontend. Any existing ACLs on this frontend will be removed. USAGE: scw lb acl set [arg=value ...] ARGS: - acls.{index}.name Name of your ACL resource - [acls.{index}.action.type] The action type (allow | deny | redirect) + acls.{index}.name ACL name + [acls.{index}.action.type] Action to take when incoming traffic matches an ACL filter (allow | deny | redirect) [acls.{index}.action.redirect.type] Redirect type (location | scheme) - [acls.{index}.action.redirect.target] Redirect target (target URL for `location`, or target `scheme`) + [acls.{index}.action.redirect.target] Redirect target. For a location redirect, you can use a URL e.g. `https://scaleway.com`. Using a scheme name (e.g. `https`, `http`, `ftp`, `git`) will replace the request's original scheme. This can be useful to implement HTTP to HTTPS redirects. Valid placeholders that can be used in a `location` redirect to preserve parts of the original request in the redirection URL are {{ host }}, {{ query }}, {{ path }} and {{ scheme }} [acls.{index}.action.redirect.code] HTTP redirect code to use. Valid values are 301, 302, 303, 307 and 308. Default value is 302 - [acls.{index}.match.ip-subnet.{index}] A list of IPs or CIDR v4/v6 addresses of the client of the session to match - [acls.{index}.match.http-filter] The HTTP filter to match (acl_http_filter_none | path_begin | path_end | regex | http_header_match) - [acls.{index}.match.http-filter-value.{index}] A list of possible values to match for the given HTTP filter - [acls.{index}.match.http-filter-option] A exra parameter. You can use this field with http_header_match acl type to set the header name to filter - [acls.{index}.match.invert] If set to `true`, the ACL matching condition will be of type "UNLESS" - acls.{index}.index Order between your Acls (ascending order, 0 is first acl executed) - [acls.{index}.description] Description of your ACL ressource - frontend-id The Frontend to change ACL to + [acls.{index}.match.ip-subnet.{index}] List of IPs or CIDR v4/v6 addresses to filter for from the client side + [acls.{index}.match.http-filter] Type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends (acl_http_filter_none | path_begin | path_end | regex | http_header_match) + [acls.{index}.match.http-filter-value.{index}] List of values to filter for + [acls.{index}.match.http-filter-option] Name of the HTTP header to filter on if `http_header_match` was selected in `http_filter` + [acls.{index}.match.invert] Defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match + acls.{index}.index Priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed) + [acls.{index}.description] ACL description + frontend-id Frontend ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbacl-update-usage.golden b/cmd/scw/testdata/test-all-usage-lbacl-update-usage.golden index 32202cc7ae..8bb737ff59 100644 --- a/cmd/scw/testdata/test-all-usage-lbacl-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbacl-update-usage.golden @@ -1,24 +1,24 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Update an ACL. +Update a particular ACL, specified by its ACL ID. You can update details including its name, action and match rule. USAGE: scw lb acl update [arg=value ...] ARGS: - acl-id ID of your ACL ressource - name Name of your ACL ressource - [action.type] The action type (allow | deny | redirect) + acl-id ACL ID + name ACL name + [action.type] Action to take when incoming traffic matches an ACL filter (allow | deny | redirect) [action.redirect.type] Redirect type (location | scheme) - [action.redirect.target] Redirect target (target URL for `location`, or target `scheme`) + [action.redirect.target] Redirect target. For a location redirect, you can use a URL e.g. `https://scaleway.com`. Using a scheme name (e.g. `https`, `http`, `ftp`, `git`) will replace the request's original scheme. This can be useful to implement HTTP to HTTPS redirects. Valid placeholders that can be used in a `location` redirect to preserve parts of the original request in the redirection URL are {{ host }}, {{ query }}, {{ path }} and {{ scheme }} [action.redirect.code] HTTP redirect code to use. Valid values are 301, 302, 303, 307 and 308. Default value is 302 - [match.ip-subnet.{index}] A list of IPs or CIDR v4/v6 addresses of the client of the session to match - [match.http-filter] The HTTP filter to match (acl_http_filter_none | path_begin | path_end | regex | http_header_match) - [match.http-filter-value.{index}] A list of possible values to match for the given HTTP filter - [match.http-filter-option] A exra parameter. You can use this field with http_header_match acl type to set the header name to filter - [match.invert] If set to `true`, the ACL matching condition will be of type "UNLESS" - index Order between your Acls (ascending order, 0 is first acl executed) - [description] Description of your ACL ressource + [match.ip-subnet.{index}] List of IPs or CIDR v4/v6 addresses to filter for from the client side + [match.http-filter] Type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends (acl_http_filter_none | path_begin | path_end | regex | http_header_match) + [match.http-filter-value.{index}] List of values to filter for + [match.http-filter-option] Name of the HTTP header to filter on if `http_header_match` was selected in `http_filter` + [match.invert] Defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match + index Priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed) + [description] ACL description [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbacl-usage.golden b/cmd/scw/testdata/test-all-usage-lbacl-usage.golden index 3555b45847..371cf95387 100644 --- a/cmd/scw/testdata/test-all-usage-lbacl-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbacl-usage.golden @@ -9,8 +9,8 @@ AVAILABLE COMMANDS: create Create an ACL for a given frontend delete Delete an ACL get Get an ACL - list List ACL for a given frontend - set Set all ACLs for a given frontend + list List ACLs for a given frontend + set Define all ACLs for a given frontend update Update an ACL FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbip-create-usage.golden b/cmd/scw/testdata/test-all-usage-lbip-create-usage.golden index f661955587..d3eacbdb85 100644 --- a/cmd/scw/testdata/test-all-usage-lbip-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbip-create-usage.golden @@ -1,13 +1,13 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Create an IP. +Create a new Load Balancer flexible IP address, in the specified Scaleway Project. This can be attached to new Load Balancers created in the future. USAGE: scw lb ip create [arg=value ...] ARGS: [project-id] Project ID to use. If none is passed the default project ID will be used - [reverse] Reverse domain name + [reverse] Reverse DNS (domain name) for the IP address [organization-id] Organization ID to use. If none is passed the default organization ID will be used [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) diff --git a/cmd/scw/testdata/test-all-usage-lbip-delete-usage.golden b/cmd/scw/testdata/test-all-usage-lbip-delete-usage.golden index 2e07e865d2..f456af11b5 100644 --- a/cmd/scw/testdata/test-all-usage-lbip-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbip-delete-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Delete an IP. +Delete a Load Balancer flexible IP address. This action is irreversible, and cannot be undone. USAGE: scw lb ip delete [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lbip-get-usage.golden b/cmd/scw/testdata/test-all-usage-lbip-get-usage.golden index fcd2a0ca90..414cf9cadb 100644 --- a/cmd/scw/testdata/test-all-usage-lbip-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbip-get-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get an IP. +Retrieve the full details of a Load Balancer flexible IP address. USAGE: scw lb ip get [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lbip-list-usage.golden b/cmd/scw/testdata/test-all-usage-lbip-list-usage.golden index e72f65dd58..b42591ee8c 100644 --- a/cmd/scw/testdata/test-all-usage-lbip-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbip-list-usage.golden @@ -1,14 +1,14 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List IPs. +List the Load Balancer flexible IP addresses held in the account (filtered by Organization ID or Project ID). It is also possible to search for a specific IP address. USAGE: scw lb ip list [arg=value ...] ARGS: - [ip-address] Use this to search by IP address - [project-id] Filter IPs by project ID - [organization-id] Filter IPs by organization id + [ip-address] IP address to filter for + [project-id] Project ID to filter for, only Load Balancer IP addresses from this Project will be returned + [organization-id] Organization ID to filter for, only Load Balancer IP addresses from this Organization will be returned [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbip-update-usage.golden b/cmd/scw/testdata/test-all-usage-lbip-update-usage.golden index b4baa3ee7a..339185936e 100644 --- a/cmd/scw/testdata/test-all-usage-lbip-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbip-update-usage.golden @@ -1,13 +1,13 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Update an IP. +Update the reverse DNS of a Load Balancer flexible IP address. USAGE: scw lb ip update [arg=value ...] ARGS: ip-id IP address ID - [reverse] Reverse DNS + [reverse] Reverse DNS (domain name) for the IP address [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lbip-usage.golden b/cmd/scw/testdata/test-all-usage-lbip-usage.golden index 043364a169..149dad3053 100644 --- a/cmd/scw/testdata/test-all-usage-lbip-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lbip-usage.golden @@ -6,11 +6,11 @@ USAGE: scw lb ip AVAILABLE COMMANDS: - create Create an IP - delete Delete an IP - get Get an IP - list List IPs - update Update an IP + create Create an IP address + delete Delete an IP address + get Get an IP address + list List IP addresses + update Update an IP address FLAGS: -h, --help help for ip diff --git a/cmd/scw/testdata/test-all-usage-lblb-create-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-create-usage.golden index 7a3a81ed02..b131e250ee 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-create-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-create-usage.golden @@ -1,18 +1,18 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Create a load balancer. +Create a new Load Balancer. Note that the Load Balancer will be created without frontends or backends; these must be created separately via the dedicated endpoints. USAGE: scw lb lb create [arg=value ...] ARGS: [project-id] Project ID to use. If none is passed the default project ID will be used - name= Resource names - [description] Resource description - [ip-id] Just like for compute instances, when you destroy a load balancer, you can keep its highly available IP address and reuse it for another load balancer later - [tags.{index}] List of keyword - [type=LB-S] Load balancer offer type (LB-S | LB-GP-M | LB-GP-L) - [ssl-compatibility-level] (ssl_compatibility_level_unknown | ssl_compatibility_level_intermediate | ssl_compatibility_level_modern | ssl_compatibility_level_old) + name= Name for the Load Balancer + [description] Description for the Load Balancer + [ip-id] ID of an existing flexible IP address to attach to the Load Balancer + [tags.{index}] List of tags for the Load Balancer + [type=LB-S] Load Balancer commercial offer type. Use the Load Balancer types endpoint to retrieve a list of available offer types (LB-S | LB-GP-M | LB-GP-L) + [ssl-compatibility-level] Determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and do not need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort (ssl_compatibility_level_unknown | ssl_compatibility_level_intermediate | ssl_compatibility_level_modern | ssl_compatibility_level_old) [organization-id] Organization ID to use. If none is passed the default organization ID will be used [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) diff --git a/cmd/scw/testdata/test-all-usage-lblb-delete-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-delete-usage.golden index 6971bdd8bd..0ba2cb1a30 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-delete-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-delete-usage.golden @@ -1,13 +1,13 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Delete a load balancer. +Delete an existing Load Balancer, specified by its Load Balancer ID. Deleting a Load Balancer is permanent, and cannot be undone. The Load Balancer's flexible IP address can either be deleted with the Load Balancer, or kept in your account for future use. USAGE: scw lb lb delete [arg=value ...] ARGS: - lb-id Load balancer ID - [release-ip] Set true if you don't want to keep this IP address + lb-id ID of the Load Balancer to delete + [release-ip] Defines whether the Load Balancer's flexible IP should be deleted. Set to true to release the flexible IP, or false to keep it available in your account for future Load Balancers [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lblb-get-stats-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-get-stats-usage.golden index 1c7fadd78f..068e3632cf 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-get-stats-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-get-stats-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get usage statistics of a given load balancer. +Get usage statistics of a given Load Balancer. USAGE: scw lb lb get-stats [arg=value ...] ARGS: - lb-id Load balancer ID + lb-id Load Balancer ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lblb-get-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-get-usage.golden index 05f96797d3..15ec14d8e4 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-get-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-get-usage.golden @@ -1,12 +1,12 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Get a load balancer. +Retrieve information about an existing Load Balancer, specified by its Load Balancer ID. Its full details, including name, status and IP address, are returned in the response object. USAGE: scw lb lb get [arg=value ...] ARGS: - lb-id Load balancer ID + lb-id Load Balancer ID [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lblb-list-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-list-usage.golden index 5eb02bf9c7..dbee5420e1 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-list-usage.golden @@ -1,15 +1,15 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List load balancers. +List all Load Balancers in the specified zone, for a Scaleway Organization or Scaleway Project. By default, the Load Balancers returned in the list are ordered by creation date in ascending order, though this can be modified via the `order_by` field. USAGE: scw lb lb list [arg=value ...] ARGS: - [name] Use this to search by name - [order-by] Response order (created_at_asc | created_at_desc | name_asc | name_desc) - [project-id] Filter LBs by project ID - [organization-id] Filter LBs by organization ID + [name] Load Balancer name to filter for + [order-by] Sort order of Load Balancers in the response (created_at_asc | created_at_desc | name_asc | name_desc) + [project-id] Project ID to filter for, only Load Balancers from this Project will be returned + [organization-id] Organization ID to filter for, only Load Balancers from this Organization will be returned [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2 | all) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lblb-migrate-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-migrate-usage.golden index 58bdcf40ae..0b010c4da4 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-migrate-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-migrate-usage.golden @@ -1,13 +1,13 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Migrate a load balancer. +Migrate an existing Load Balancer from one commercial type to another. Allows you to scale your Load Balancer up or down in terms of bandwidth or multi-cloud provision. USAGE: scw lb lb migrate [arg=value ...] ARGS: - lb-id Load balancer ID - type Load balancer type (check /lb-types to list all type) (LB-S | LB-GP-M | LB-GP-L) + lb-id Load Balancer ID + type Load Balancer type to migrate to (use the List all Load Balancer offer types endpoint to get a list of available offer types) (LB-S | LB-GP-M | LB-GP-L) [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lblb-types-list-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-types-list-usage.golden index a04026c4b5..2e91c876e2 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-types-list-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-types-list-usage.golden @@ -1,6 +1,6 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -List all load balancer offer type. +List all the different commercial Load Balancer types. The response includes an array of offer types, each with a name, description, and information about its stock availability. USAGE: scw lb lb-types list [arg=value ...] diff --git a/cmd/scw/testdata/test-all-usage-lblb-types-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-types-usage.golden index cefb2e9d87..53fd40bb8e 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-types-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-types-usage.golden @@ -6,7 +6,7 @@ USAGE: scw lb lb-types AVAILABLE COMMANDS: - list List all load balancer offer type + list List all Load Balancer offer types FLAGS: -h, --help help for lb-types diff --git a/cmd/scw/testdata/test-all-usage-lblb-update-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-update-usage.golden index a6d32bb7cf..5db3fd7ab9 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-update-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-update-usage.golden @@ -1,16 +1,16 @@ 🎲🎲🎲 EXIT CODE: 0 🎲🎲🎲 πŸŸ₯πŸŸ₯πŸŸ₯ STDERR️️ πŸŸ₯πŸŸ₯πŸŸ₯️ -Update a load balancer. +Update the parameters of an existing Load Balancer, specified by its Load Balancer ID. Note that the request type is PUT and not PATCH. You must set all parameters. USAGE: scw lb lb update [arg=value ...] ARGS: - lb-id Load balancer ID - name Resource name - description Resource description - [tags.{index}] List of keywords - [ssl-compatibility-level] (ssl_compatibility_level_unknown | ssl_compatibility_level_intermediate | ssl_compatibility_level_modern | ssl_compatibility_level_old) + lb-id Load Balancer ID + name Load Balancer name + description Load Balancer description + [tags.{index}] List of tags for the Load Balancer + [ssl-compatibility-level] Determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and don't need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort (ssl_compatibility_level_unknown | ssl_compatibility_level_intermediate | ssl_compatibility_level_modern | ssl_compatibility_level_old) [zone=fr-par-1] Zone to target. If none is passed will use default zone from the config (fr-par-1 | fr-par-2 | nl-ams-1 | nl-ams-2 | pl-waw-1 | pl-waw-2) FLAGS: diff --git a/cmd/scw/testdata/test-all-usage-lblb-usage.golden b/cmd/scw/testdata/test-all-usage-lblb-usage.golden index 376f25343c..c01267632b 100644 --- a/cmd/scw/testdata/test-all-usage-lblb-usage.golden +++ b/cmd/scw/testdata/test-all-usage-lblb-usage.golden @@ -6,13 +6,13 @@ USAGE: scw lb lb AVAILABLE COMMANDS: - create Create a load balancer - delete Delete a load balancer - get Get a load balancer - get-stats Get usage statistics of a given load balancer - list List load balancers - migrate Migrate a load balancer - update Update a load balancer + create Create a Load Balancer + delete Delete a Load Balancer + get Get a Load Balancer + get-stats Get usage statistics of a given Load Balancer + list List Load Balancers + migrate Migrate a Load Balancer + update Update a Load Balancer wait Wait for a load balancer to reach a stable state FLAGS: diff --git a/cmd/scw/testdata/test-main-usage-usage.golden b/cmd/scw/testdata/test-main-usage-usage.golden index a08a37f783..d102dbbba7 100644 --- a/cmd/scw/testdata/test-main-usage-usage.golden +++ b/cmd/scw/testdata/test-main-usage-usage.golden @@ -23,7 +23,7 @@ AVAILABLE COMMANDS: instance Instance API iot This API allows you to manage IoT hubs and devices k8s Kapsule API - lb This API allows you to manage your load balancer service + lb This API allows you to manage your Scaleway Load Balancer services marketplace Marketplace API mnq This API allows you to manage Messaging or Queueing brokers object Object-storage utils diff --git a/docs/commands/k8s.md b/docs/commands/k8s.md index 711ddb79b4..85d0527f1e 100644 --- a/docs/commands/k8s.md +++ b/docs/commands/k8s.md @@ -5,8 +5,8 @@ Kapsule API. - [Kapsule cluster management commands](#kapsule-cluster-management-commands) - [Create a new cluster](#create-a-new-cluster) - [Delete a cluster](#delete-a-cluster) - - [Get a cluster](#get-a-cluster) - - [List all the clusters](#list-all-the-clusters) + - [Get specific cluster information](#get-specific-cluster-information) + - [List all clusters](#list-all-clusters) - [List available versions for a cluster](#list-available-versions-for-a-cluster) - [Reset the admin token of a cluster](#reset-the-admin-token-of-a-cluster) - [Update a cluster](#update-a-cluster) @@ -46,7 +46,7 @@ It is composed of different pools, each pool containing the same kind of nodes. ### Create a new cluster -This method allows to create a new Kubernetes cluster on an account. +Creates a new Kubernetes cluster on a Scaleway account. **Usage:** @@ -60,30 +60,30 @@ scw k8s cluster create [arg=value ...] | Name | | Description | |------|---|-------------| | project-id | | Project ID to use. If none is passed the default project ID will be used | -| type | | The type of the cluster | -| name | Required
Default: `` | The name of the cluster | -| description | | The description of the cluster | -| tags.{index} | | The tags associated with the cluster | -| version | Required
Default: `latest` | The Kubernetes version of the cluster | -| cni | Required
Default: `cilium`
One of: `unknown_cni`, `cilium`, `calico`, `weave`, `flannel`, `kilo` | The Container Network Interface (CNI) plugin that will run in the cluster | -| ~~enable-dashboard~~ | Deprecated | The enablement of the Kubernetes Dashboard in the cluster | -| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | The Ingress Controller that will run in the cluster | -| pools.{index}.name | Required | The name of the pool | -| pools.{index}.node-type | Required | The node type is the type of Scaleway Instance wanted for the pool | -| pools.{index}.placement-group-id | | The placement group ID in which all the nodes of the pool will be created | -| pools.{index}.autoscaling | | The enablement of the autoscaling feature for the pool | -| pools.{index}.size | Required | The size (number of nodes) of the pool | -| pools.{index}.min-size | | The minimum size of the pool | -| pools.{index}.max-size | | The maximum size of the pool | -| pools.{index}.container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | The container runtime for the nodes of the pool | -| pools.{index}.autohealing | | The enablement of the autohealing feature for the pool | -| pools.{index}.tags.{index} | | The tags associated with the pool | -| pools.{index}.kubelet-args.{key} | | The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | +| type | | Type of the cluster | +| name | Required
Default: `` | Name of the cluster | +| description | | Description of the cluster | +| tags.{index} | | Tags associated with the cluster | +| version | Required
Default: `latest` | Kubernetes version of the cluster | +| cni | Required
Default: `cilium`
One of: `unknown_cni`, `cilium`, `calico`, `weave`, `flannel`, `kilo` | Container Network Interface (CNI) plugin that will run in the cluster | +| ~~enable-dashboard~~ | Deprecated | Defines if the Kubernetes Dashboard is enabled in the cluster | +| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | Ingress Controller that will run in the cluster | +| pools.{index}.name | Required | Name of the pool | +| pools.{index}.node-type | Required | Node type is the type of Scaleway Instance wanted for the pool | +| pools.{index}.placement-group-id | | Placement group ID in which all the nodes of the pool will be created | +| pools.{index}.autoscaling | | Defines whether the autoscaling feature is enabled for the pool | +| pools.{index}.size | Required | Size (number of nodes) of the pool | +| pools.{index}.min-size | | Minimum size of the pool | +| pools.{index}.max-size | | Maximum size of the pool | +| pools.{index}.container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | Container runtime for the nodes of the pool | +| pools.{index}.autohealing | | Defines whether the autohealing feature is enabled for the pool | +| pools.{index}.tags.{index} | | Tags associated with the pool | +| pools.{index}.kubelet-args.{key} | | Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | | pools.{index}.upgrade-policy.max-unavailable | | The maximum number of nodes that can be not ready at the same time | | pools.{index}.upgrade-policy.max-surge | | The maximum number of nodes to be created during the upgrade | -| pools.{index}.zone | | The Zone in which the Pool's node will be spawn in | -| pools.{index}.root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | The system volume disk type | -| pools.{index}.root-volume-size | | The system volume disk size | +| pools.{index}.zone | | Zone in which the pool's nodes will be spawned | +| pools.{index}.root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | System volume disk type | +| pools.{index}.root-volume-size | | System volume disk size | | autoscaler-config.scale-down-disabled | | Disable the cluster autoscaler | | autoscaler-config.scale-down-delay-after-add | | How long after scale up that scale down evaluation resumes | | autoscaler-config.estimator | One of: `unknown_estimator`, `binpacking` | Type of resource estimator to be used in scale up | @@ -95,8 +95,8 @@ scw k8s cluster create [arg=value ...] | autoscaler-config.scale-down-utilization-threshold | | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down | | autoscaler-config.max-graceful-termination-sec | | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node | | auto-upgrade.enable | | Whether or not auto upgrade is enabled for the cluster | -| auto-upgrade.maintenance-window.start-hour | | The start hour of the 2-hour maintenance window | -| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | The day of the week for the maintenance window | +| auto-upgrade.maintenance-window.start-hour | | Start time of the two-hour maintenance window | +| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | Day of the week for the maintenance window | | feature-gates.{index} | | List of feature gates to enable | | admission-plugins.{index} | | List of admission plugins to enable | | open-id-connect-config.issuer-url | | URL of the provider which allows the API server to discover public signing keys | @@ -105,7 +105,7 @@ scw k8s cluster create [arg=value ...] | open-id-connect-config.username-prefix | | Prefix prepended to username | | open-id-connect-config.groups-claim.{index} | | JWT claim to use as the user's group | | open-id-connect-config.groups-prefix | | Prefix prepended to group claims | -| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID Token | +| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID token | | apiserver-cert-sans.{index} | | Additional Subject Alternative Names for the Kubernetes API server certificate | | organization-id | | Organization ID to use. If none is passed the default organization ID will be used | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -129,7 +129,7 @@ scw k8s cluster create name=bar version=1.24.7 tags.0=tag1 tags.1=tag2 cni=calic ### Delete a cluster -This method allows to delete a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. +Deletes a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster. **Usage:** @@ -142,7 +142,7 @@ scw k8s cluster delete [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster to delete | +| cluster-id | Required | ID of the cluster to delete | | with-additional-resources | | Set true if you want to delete all volumes (including retain volume type) and loadbalancers whose name start with cluster ID | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -150,7 +150,7 @@ scw k8s cluster delete [arg=value ...] **Examples:** -Delete a given cluster +Delete a cluster ``` scw k8s cluster delete 11111111-1111-1111-111111111111 ``` @@ -158,9 +158,9 @@ scw k8s cluster delete 11111111-1111-1111-111111111111 -### Get a cluster +### Get specific cluster information -This method allows to get details about a specific Kubernetes cluster. +Get details about a specific Kubernetes cluster. **Usage:** @@ -180,7 +180,7 @@ scw k8s cluster get [arg=value ...] **Examples:** -Get a given cluster +Get a cluster information ``` scw k8s cluster get 11111111-1111-1111-111111111111 ``` @@ -188,9 +188,9 @@ scw k8s cluster get 11111111-1111-1111-111111111111 -### List all the clusters +### List all clusters -This method allows to list all the existing Kubernetes clusters in an account. +List all the existing Kubernetes clusters in a specific Region. **Usage:** @@ -203,19 +203,19 @@ scw k8s cluster list [arg=value ...] | Name | | Description | |------|---|-------------| -| project-id | | The project ID on which to filter the returned clusters | -| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | The sort order of the returned clusters | -| name | | The name on which to filter the returned clusters | -| status | One of: `unknown`, `creating`, `ready`, `deleting`, `deleted`, `updating`, `locked`, `pool_required` | The status on which to filter the returned clusters | -| type | | The type on which to filter the returned clusters | -| organization-id | | The organization ID on which to filter the returned clusters | +| project-id | | Project ID on which to filter the returned clusters | +| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | Sort order of the returned clusters | +| name | | Name on which to filter the returned clusters | +| status | One of: `unknown`, `creating`, `ready`, `deleting`, `deleted`, `updating`, `locked`, `pool_required` | Status on which to filter the returned clusters | +| type | | Type on which to filter the returned clusters | +| organization-id | | Organization ID on which to filter the returned clusters | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw`, `all` | Region to target. If none is passed will use default region from the config | **Examples:** -List all the clusters on your default region +List all clusters on your default region ``` scw k8s cluster list ``` @@ -235,7 +235,7 @@ scw k8s cluster list region=fr-par name=cluster1 ### List available versions for a cluster -This method allows to list the versions that a specific Kubernetes cluster is allowed to upgrade to. Note that it will be every patch version greater than the actual one as well a one minor version ahead of the actual one. Upgrades skipping a minor version will not work. +List the versions that a specific Kubernetes cluster is allowed to upgrade to. Results will comprise every patch version greater than the current patch, as well as one minor version ahead of the current version. Any upgrade skipping a minor version will not work. **Usage:** @@ -248,14 +248,14 @@ scw k8s cluster list-available-versions [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster which the available Kuberentes versions will be listed from | +| cluster-id | Required | ID of the cluster which the available Kuberentes versions will be listed from | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -List all available versions for a given cluster to upgrade to +List all available versions for a cluster to upgrade to ``` scw k8s cluster list-available-versions 11111111-1111-1111-111111111111 ``` @@ -265,7 +265,7 @@ scw k8s cluster list-available-versions 11111111-1111-1111-111111111111 ### Reset the admin token of a cluster -This method allows to reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable after) and create a new one. Note that the redownload of the kubeconfig will be necessary to keep interacting with the cluster (if the old admin token was used). +Reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to redownload kubeconfig in order to keep interacting with the cluster. **Usage:** @@ -278,14 +278,14 @@ scw k8s cluster reset-admin-token [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster of which the admin token will be renewed | +| cluster-id | Required | ID of the cluster on which the admin token will be renewed | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Reset the admin token for a given cluster +Reset the admin token for a cluster ``` scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111 ``` @@ -295,7 +295,7 @@ scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111 ### Update a cluster -This method allows to update a specific Kubernetes cluster. Note that this method is not made to upgrade a Kubernetes cluster. +Update a specific Kubernetes cluster. Note that this method is designed to update details such as name, description, tags and configuration. However, you cannot upgrade a cluster with this method. To do so, use the dedicated endpoint. **Usage:** @@ -308,10 +308,10 @@ scw k8s cluster update [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster to update | -| name | | The new name of the cluster | -| description | | The new description of the cluster | -| tags.{index} | | The new tags associated with the cluster | +| cluster-id | Required | ID of the cluster to update | +| name | | New external name of the cluster | +| description | | New description of the cluster | +| tags.{index} | | New tags associated with the cluster | | autoscaler-config.scale-down-disabled | | Disable the cluster autoscaler | | autoscaler-config.scale-down-delay-after-add | | How long after scale up that scale down evaluation resumes | | autoscaler-config.estimator | One of: `unknown_estimator`, `binpacking` | Type of resource estimator to be used in scale up | @@ -322,11 +322,11 @@ scw k8s cluster update [arg=value ...] | autoscaler-config.scale-down-unneeded-time | | How long a node should be unneeded before it is eligible for scale down | | autoscaler-config.scale-down-utilization-threshold | | Node utilization level, defined as sum of requested resources divided by capacity, below which a node can be considered for scale down | | autoscaler-config.max-graceful-termination-sec | | Maximum number of seconds the cluster autoscaler waits for pod termination when trying to scale down a node | -| ~~enable-dashboard~~ | Deprecated | The new value of the Kubernetes Dashboard enablement | -| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | The new Ingress Controller for the cluster | +| ~~enable-dashboard~~ | Deprecated | New value of the Kubernetes Dashboard enablement | +| ~~ingress~~ | Deprecated
One of: `unknown_ingress`, `none`, `nginx`, `traefik`, `traefik2` | New Ingress Controller for the cluster | | auto-upgrade.enable | | Whether or not auto upgrade is enabled for the cluster | -| auto-upgrade.maintenance-window.start-hour | | The start hour of the 2-hour maintenance window | -| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | The day of the week for the maintenance window | +| auto-upgrade.maintenance-window.start-hour | | Start time of the two-hour maintenance window | +| auto-upgrade.maintenance-window.day | One of: `any`, `monday`, `tuesday`, `wednesday`, `thursday`, `friday`, `saturday`, `sunday` | Day of the week for the maintenance window | | feature-gates.{index} | | List of feature gates to enable | | admission-plugins.{index} | | List of admission plugins to enable | | open-id-connect-config.issuer-url | | URL of the provider which allows the API server to discover public signing keys | @@ -335,7 +335,7 @@ scw k8s cluster update [arg=value ...] | open-id-connect-config.username-prefix | | Prefix prepended to username | | open-id-connect-config.groups-claim.{index} | | JWT claim to use as the user's group | | open-id-connect-config.groups-prefix | | Prefix prepended to group claims | -| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID Token | +| open-id-connect-config.required-claim.{index} | | Multiple key=value pairs that describes a required claim in the ID token | | apiserver-cert-sans.{index} | | Additional Subject Alternative Names for the Kubernetes API server certificate | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -343,12 +343,12 @@ scw k8s cluster update [arg=value ...] **Examples:** -Enable dashboard on a given cluster +Enable dashboard on a cluster ``` scw k8s cluster update 11111111-1111-1111-111111111111 enable-dashboard=true ``` -Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a given cluster +Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a cluster ``` scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterFinished feature-gates.1=ServiceNodeExclusion ``` @@ -358,7 +358,7 @@ scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterF ### Upgrade a cluster -This method allows to upgrade a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. +Upgrades a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version. **Usage:** @@ -371,21 +371,21 @@ scw k8s cluster upgrade [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster to upgrade | -| version | Required | The new Kubernetes version of the cluster | -| upgrade-pools | | The enablement of the pools upgrade | +| cluster-id | Required | ID of the cluster to upgrade | +| version | Required | New Kubernetes version of the cluster | +| upgrade-pools | | Enablement of the pools upgrade | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Upgrade a given cluster to Kubernetes version 1.24.7 (without upgrading the pools) +Upgrade a cluster to Kubernetes version 1.24.7 (without upgrading the pools) ``` scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 ``` -Upgrade a given cluster to Kubernetes version 1.24.7 (and upgrade the pools) +Upgrade a cluster to Kubernetes version 1.24.7 (and upgrade the pools) ``` scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 upgrade-pools=true ``` @@ -523,15 +523,13 @@ scw k8s kubeconfig uninstall 11111111-1111-1111-1111-111111111111 ## Kapsule node management commands -A node (short for worker node) is an abstraction for a Scaleway Instance. -It is part of a pool and is instantiated by Scaleway, making Kubernetes software installed and configured automatically on it. -Please note that Kubernetes nodes cannot be accessed with ssh. +A node (short for worker node) is an abstraction for a Scaleway Instance. A node is always part of a pool. Each of them will have Kubernetes software automatically installed and configured by Scaleway. Please note that Kubernetes nodes cannot be accessed with SSH. ### Delete a node in a cluster -This method allows to delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster for instance), you may experience some disruption of your applications. **Usage:** @@ -544,7 +542,7 @@ scw k8s node delete [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the node to replace | +| node-id | Required | ID of the node to replace | | skip-drain | | Skip draining node from its workload | | replace | | Add a new node after the deletion of this node | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -553,17 +551,17 @@ scw k8s node delete [arg=value ...] **Examples:** -Delete a given node +Delete a node ``` scw k8s node delete 11111111-1111-1111-111111111111 ``` -Delete a given node without evicting workloads +Delete a node without evicting workloads ``` scw k8s node delete 11111111-1111-1111-111111111111 skip-drain=true ``` -Replace a given node by a new one +Replace a node by a new one ``` scw k8s node delete 11111111-1111-1111-111111111111 replace=true ``` @@ -573,7 +571,7 @@ scw k8s node delete 11111111-1111-1111-111111111111 replace=true ### Get a node in a cluster -This method allows to get details about a specific Kubernetes node. +Get details about a specific Kubernetes node. **Usage:** @@ -586,14 +584,14 @@ scw k8s node get [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the requested node | +| node-id | Required | ID of the requested node | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Get a given node +Get a node ``` scw k8s node get 11111111-1111-1111-111111111111 ``` @@ -603,7 +601,7 @@ scw k8s node get 11111111-1111-1111-111111111111 ### List all the nodes in a cluster -This method allows to list all the existing nodes for a specific Kubernetes cluster. +List all the existing nodes for a specific Kubernetes cluster. **Usage:** @@ -616,28 +614,28 @@ scw k8s node list [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The cluster ID from which the nodes will be listed from | -| pool-id | | The pool ID on which to filter the returned nodes | -| order-by | One of: `created_at_asc`, `created_at_desc` | The sort order of the returned nodes | -| name | | The name on which to filter the returned nodes | -| status | One of: `unknown`, `creating`, `not_ready`, `ready`, `deleting`, `deleted`, `locked`, `rebooting`, `creation_error`, `upgrading`, `starting`, `registering` | The status on which to filter the returned nodes | +| cluster-id | Required | Cluster ID from which the nodes will be listed from | +| pool-id | | Pool ID on which to filter the returned nodes | +| order-by | One of: `created_at_asc`, `created_at_desc` | Sort order of the returned nodes | +| name | | Name on which to filter the returned nodes | +| status | One of: `unknown`, `creating`, `not_ready`, `ready`, `deleting`, `deleted`, `locked`, `rebooting`, `creation_error`, `upgrading`, `starting`, `registering` | Status on which to filter the returned nodes | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw`, `all` | Region to target. If none is passed will use default region from the config | **Examples:** -List all the nodes in the given cluster +List all the nodes in the cluster ``` scw k8s node list cluster-id=11111111-1111-1111-111111111111 ``` -List all the nodes in the pool 2222222222222-2222-222222222222 in the given cluster +List all the nodes in the pool 2222222222222-2222-222222222222 in the cluster ``` scw k8s node list cluster-id=11111111-1111-1111-111111111111 pool-id=2222222222222-2222-222222222222 ``` -List all ready nodes in the given cluster +List all ready nodes in the cluster ``` scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready ``` @@ -647,7 +645,7 @@ scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready ### Reboot a node in a cluster -This method allows to reboot a specific node. This node will frist be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Reboot a specific node. This node will first be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster, for instance), you may experience some disruption of your applications. **Usage:** @@ -660,14 +658,14 @@ scw k8s node reboot [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the node to reboot | +| node-id | Required | ID of the node to reboot | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Reboot a given node +Reboot a node ``` scw k8s node reboot 11111111-1111-1111-111111111111 ``` @@ -677,7 +675,7 @@ scw k8s node reboot 11111111-1111-1111-111111111111 ### Replace a node in a cluster -This method allows to replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. +Replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications. **Usage:** @@ -690,14 +688,14 @@ scw k8s node replace [arg=value ...] | Name | | Description | |------|---|-------------| -| node-id | Required | The ID of the node to replace | +| node-id | Required | ID of the node to replace | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Replace a given node +Replace a node ``` scw k8s node replace 11111111-1111-1111-111111111111 ``` @@ -737,15 +735,13 @@ scw k8s node wait 11111111-1111-1111-1111-111111111111 ## Kapsule pool management commands -A pool is a set of identical Nodes. A pool has a name, a size (its current number of nodes), nodes number limits (min, max) and a Scaleway instance type. -Changing those limits increases/decreases the size of a pool. Thus, when autoscaling is enabled, the pool will grow or shrink inside those limits, depending on its load. -A "default pool" is automatically created with every cluster. +A pool is a set of identical nodes. A pool has a name, a size (its current number of nodes), node number limits (min, max), and a Scaleway Instance type. Changing those limits increases/decreases the size of a pool. Thus, the pool will grow or shrink inside those limits when autoscaling is enabled, depending on its load. A "default pool" is automatically created with every cluster. ### Create a new pool in a cluster -This method allows to create a new pool in a specific Kubernetes cluster. +Create a new pool in a specific Kubernetes cluster. **Usage:** @@ -758,40 +754,40 @@ scw k8s pool create [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster in which the pool will be created | -| name | Required
Default: `` | The name of the pool | -| node-type | Required
Default: `DEV1-M` | The node type is the type of Scaleway Instance wanted for the pool | -| placement-group-id | | The placement group ID in which all the nodes of the pool will be created | -| autoscaling | | The enablement of the autoscaling feature for the pool | -| size | Required
Default: `1` | The size (number of nodes) of the pool | -| min-size | | The minimum size of the pool | -| max-size | | The maximum size of the pool | -| container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | The container runtime for the nodes of the pool | -| autohealing | | The enablement of the autohealing feature for the pool | -| tags.{index} | | The tags associated with the pool | -| kubelet-args.{key} | | The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | +| cluster-id | Required | ID of the cluster in which the pool will be created | +| name | Required
Default: `` | Name of the pool | +| node-type | Required
Default: `DEV1-M` | Node type is the type of Scaleway Instance wanted for the pool | +| placement-group-id | | Placement group ID in which all the nodes of the pool will be created | +| autoscaling | | Defines whether the autoscaling feature is enabled for the pool | +| size | Required
Default: `1` | Size (number of nodes) of the pool | +| min-size | | Minimum size of the pool | +| max-size | | Maximum size of the pool | +| container-runtime | One of: `unknown_runtime`, `docker`, `containerd`, `crio` | Container runtime for the nodes of the pool | +| autohealing | | Defines whether the autohealing feature is enabled for the pool | +| tags.{index} | | Tags associated with the pool | +| kubelet-args.{key} | | Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | | upgrade-policy.max-unavailable | | | | upgrade-policy.max-surge | | | -| zone | | The Zone in which the Pool's node will be spawn in | -| root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | The system volume disk type | -| root-volume-size | | The system volume disk size | +| zone | | Zone in which the pool's nodes will be spawned | +| root-volume-type | One of: `default_volume_type`, `l_ssd`, `b_ssd` | System volume disk type | +| root-volume-size | | System volume disk size | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Create a pool named bar with 2 DEV1-XL on a given cluster +Create a pool named bar with 2 DEV1-XL on a cluster ``` scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=bar node-type=DEV1-XL size=2 ``` -Create a pool named fish with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a given cluster +Create a pool named 'fish' with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a cluster ``` scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=fish node-type=GP1-L size=5 min-size=0 max-size=10 autoscaling=true autohealing=true container-runtime=containerd ``` -Create a tagged pool named turtle with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a given cluster +Create a tagged pool named 'turtle' with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a cluster ``` scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node-type=GP1-S size=1 placement-group-id=2222222222222-2222-222222222222 tags.0=turtle tags.1=placement-group ``` @@ -801,7 +797,7 @@ scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node- ### Delete a pool in a cluster -This method allows to delete a specific pool from a cluster, deleting all the nodes associated with it. +Delete a specific pool from a cluster. All of the pool's nodes will also be deleted. **Usage:** @@ -814,14 +810,14 @@ scw k8s pool delete [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the pool to delete | +| pool-id | Required | ID of the pool to delete | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Delete a given pool +Delete a specific pool ``` scw k8s pool delete 11111111-1111-1111-111111111111 ``` @@ -831,7 +827,7 @@ scw k8s pool delete 11111111-1111-1111-111111111111 ### Get a pool in a cluster -This method allows to get details about a specific pool. +Get details about a specific pool in a Kubernetes cluster. **Usage:** @@ -844,7 +840,7 @@ scw k8s pool get [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the requested pool | +| pool-id | Required | ID of the requested pool | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -861,7 +857,7 @@ scw k8s pool get 11111111-1111-1111-111111111111 ### List all the pools in a cluster -This method allows to list all the existing pools for a specific Kubernetes cluster. +List all the existing pools for a specific Kubernetes cluster. **Usage:** @@ -874,32 +870,32 @@ scw k8s pool list [arg=value ...] | Name | | Description | |------|---|-------------| -| cluster-id | Required | The ID of the cluster from which the pools will be listed from | -| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | The sort order of the returned pools | -| name | | The name on which to filter the returned pools | -| status | One of: `unknown`, `ready`, `deleting`, `deleted`, `scaling`, `warning`, `locked`, `upgrading` | The status on which to filter the returned pools | +| cluster-id | Required | ID of the cluster from which the pools will be listed from | +| order-by | One of: `created_at_asc`, `created_at_desc`, `updated_at_asc`, `updated_at_desc`, `name_asc`, `name_desc`, `status_asc`, `status_desc`, `version_asc`, `version_desc` | Sort order of the returned pools | +| name | | Name on which to filter the returned pools | +| status | One of: `unknown`, `ready`, `deleting`, `deleted`, `scaling`, `warning`, `locked`, `upgrading` | Status on which to filter the returned pools | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw`, `all` | Region to target. If none is passed will use default region from the config | **Examples:** -List all pools for a given cluster +List all pools for a cluster ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 ``` -List all scaling pools for a given cluster +List all scaling pools for a cluster ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 status=scaling ``` -List all pools for a given cluster that contain the word foo in the pool name +List all pools for a cluster that contains the word 'foo' in the pool name ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 name=foo ``` -List all pools for a given cluster and order them by ascending creation date +List all pools for a cluster and order them by ascending creation date ``` scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at_asc ``` @@ -909,7 +905,7 @@ scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at ### Update a pool in a cluster -This method allows to update some attributes of a specific pool such as the size, the autoscaling enablement, the tags, ... +Update attributes of a specific pool, such as size, autoscaling settings, and tags. **Usage:** @@ -922,14 +918,14 @@ scw k8s pool update [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the pool to update | -| autoscaling | | The new value for the enablement of autoscaling for the pool | -| size | | The new size for the pool | -| min-size | | The new minimun size for the pool | -| max-size | | The new maximum size for the pool | -| autohealing | | The new value for the enablement of autohealing for the pool | -| tags.{index} | | The new tags associated with the pool | -| kubelet-args.{key} | | The new Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | +| pool-id | Required | ID of the pool to update | +| autoscaling | | New value for the enablement of autoscaling for the pool | +| size | | New size for the pool | +| min-size | | New minimun size for the pool | +| max-size | | New maximum size for the pool | +| autohealing | | New value for the enablement of autohealing for the pool | +| tags.{index} | | New tags associated with the pool | +| kubelet-args.{key} | | New Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental | | upgrade-policy.max-unavailable | | | | upgrade-policy.max-surge | | | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -958,7 +954,7 @@ scw k8s pool update 11111111-1111-1111-111111111111 tags.0=my tags.1=new tags.2= ### Upgrade a pool in a cluster -This method allows to upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. +Upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster. **Usage:** @@ -971,15 +967,15 @@ scw k8s pool upgrade [arg=value ...] | Name | | Description | |------|---|-------------| -| pool-id | Required | The ID of the pool to upgrade | -| version | Required | The new Kubernetes version for the pool | +| pool-id | Required | ID of the pool to upgrade | +| version | Required | New Kubernetes version for the pool | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | **Examples:** -Upgrade a given pool to the Kubernetes version 1.24.7 +Upgrade a specific pool to the Kubernetes version 1.24.7 ``` scw k8s pool upgrade 11111111-1111-1111-111111111111 version=1.24.7 ``` @@ -1019,17 +1015,13 @@ scw k8s pool wait 11111111-1111-1111-1111-111111111111 ## Available Kubernetes version commands -A version is a vanilla Kubernetes version like `x.y.z`. -It is composed of a major version x, a minor version y and a patch version z. -Scaleway's managed Kubernetes, Kapsule, will at least support the last patch version for the last three minor release. - -Also each version have a different set of container runtimes, CNIs, ingresses, feature gates and admission plugins available. +A version is a vanilla Kubernetes version like `x.y.z`. It comprises a major version x, a minor version y, and a patch version z. Scaleway's managed Kubernetes, Kapsule, will support at minimum the last patch version for the last three minor releases. Also, each version has a different set of container runtimes, CNIs, ingresses, feature gates, and admission plugins available. ### Get details about a specific version -This method allows to get a specific Kubernetes version and the details about the version. +Get a specific Kubernetes version and the details about the version. **Usage:** @@ -1042,7 +1034,7 @@ scw k8s version get [arg=value ...] | Name | | Description | |------|---|-------------| -| version-name | Required | The requested version name | +| version-name | Required | Requested version name | | region | Default: `fr-par`
One of: `fr-par`, `nl-ams`, `pl-waw` | Region to target. If none is passed will use default region from the config | @@ -1059,7 +1051,7 @@ scw k8s version get 1.24.7 ### List all available versions -This method allows to list all available versions for the creation of a new Kubernetes cluster. +List all available versions for the creation of a new Kubernetes cluster. **Usage:** diff --git a/docs/commands/lb.md b/docs/commands/lb.md index 4461acd4f1..9780d8f43e 100644 --- a/docs/commands/lb.md +++ b/docs/commands/lb.md @@ -1,63 +1,63 @@ # Documentation for `scw lb` -This API allows you to manage your load balancer service. +This API allows you to manage your Scaleway Load Balancer services. - [Access Control List (ACL) management commands](#access-control-list-(acl)-management-commands) - [Create an ACL for a given frontend](#create-an-acl-for-a-given-frontend) - [Delete an ACL](#delete-an-acl) - [Get an ACL](#get-an-acl) - - [List ACL for a given frontend](#list-acl-for-a-given-frontend) - - [Set all ACLs for a given frontend](#set-all-acls-for-a-given-frontend) + - [List ACLs for a given frontend](#list-acls-for-a-given-frontend) + - [Define all ACLs for a given frontend](#define-all-acls-for-a-given-frontend) - [Update an ACL](#update-an-acl) - [Backend management commands](#backend-management-commands) - - [Add a set of servers in a given backend](#add-a-set-of-servers-in-a-given-backend) - - [Create a backend in a given load balancer](#create-a-backend-in-a-given-load-balancer) - - [Delete a backend in a given load balancer](#delete-a-backend-in-a-given-load-balancer) - - [Get a backend in a given load balancer](#get-a-backend-in-a-given-load-balancer) - - [List backends in a given load balancer](#list-backends-in-a-given-load-balancer) + - [Add a set of backend servers to a given backend](#add-a-set-of-backend-servers-to-a-given-backend) + - [Create a backend for a given Load Balancer](#create-a-backend-for-a-given-load-balancer) + - [Delete a backend of a given Load Balancer](#delete-a-backend-of-a-given-load-balancer) + - [Get a backend of a given Load Balancer](#get-a-backend-of-a-given-load-balancer) + - [List the backends of a given Load Balancer](#list-the-backends-of-a-given-load-balancer) - [Remove a set of servers for a given backend](#remove-a-set-of-servers-for-a-given-backend) - - [Define all servers in a given backend](#define-all-servers-in-a-given-backend) - - [Update a backend in a given load balancer](#update-a-backend-in-a-given-load-balancer) - - [Update an healthcheck for a given backend](#update-an-healthcheck-for-a-given-backend) + - [Define all backend servers for a given backend](#define-all-backend-servers-for-a-given-backend) + - [Update a backend of a given Load Balancer](#update-a-backend-of-a-given-load-balancer) + - [Update a health check for a given backend](#update-a-health-check-for-a-given-backend) - [TLS certificate management commands](#tls-certificate-management-commands) - - [Create a TLS certificate](#create-a-tls-certificate) - - [Delete a TLS certificate](#delete-a-tls-certificate) - - [Get a TLS certificate](#get-a-tls-certificate) - - [List all TLS certificates on a given load balancer](#list-all-tls-certificates-on-a-given-load-balancer) - - [Update a TLS certificate](#update-a-tls-certificate) + - [Create an SSL/TLS certificate](#create-an-ssltls-certificate) + - [Delete an SSL/TLS certificate](#delete-an-ssltls-certificate) + - [Get an SSL/TLS certificate](#get-an-ssltls-certificate) + - [List all SSL/TLS certificates on a given Load Balancer](#list-all-ssltls-certificates-on-a-given-load-balancer) + - [Update an SSL/TLS certificate](#update-an-ssltls-certificate) - [Frontend management commands](#frontend-management-commands) - - [Create a frontend in a given load balancer](#create-a-frontend-in-a-given-load-balancer) + - [Create a frontend in a given Load Balancer](#create-a-frontend-in-a-given-load-balancer) - [Delete a frontend](#delete-a-frontend) - [Get a frontend](#get-a-frontend) - - [List frontends in a given load balancer](#list-frontends-in-a-given-load-balancer) + - [List frontends of a given Load Balancer](#list-frontends-of-a-given-load-balancer) - [Update a frontend](#update-a-frontend) - [IP management commands](#ip-management-commands) - - [Create an IP](#create-an-ip) - - [Delete an IP](#delete-an-ip) - - [Get an IP](#get-an-ip) - - [List IPs](#list-ips) - - [Update an IP](#update-an-ip) + - [Create an IP address](#create-an-ip-address) + - [Delete an IP address](#delete-an-ip-address) + - [Get an IP address](#get-an-ip-address) + - [List IP addresses](#list-ip-addresses) + - [Update an IP address](#update-an-ip-address) - [Load balancer management commands](#load-balancer-management-commands) - - [Create a load balancer](#create-a-load-balancer) - - [Delete a load balancer](#delete-a-load-balancer) - - [Get a load balancer](#get-a-load-balancer) - - [Get usage statistics of a given load balancer](#get-usage-statistics-of-a-given-load-balancer) - - [List load balancers](#list-load-balancers) - - [Migrate a load balancer](#migrate-a-load-balancer) - - [Update a load balancer](#update-a-load-balancer) + - [Create a Load Balancer](#create-a-load-balancer) + - [Delete a Load Balancer](#delete-a-load-balancer) + - [Get a Load Balancer](#get-a-load-balancer) + - [Get usage statistics of a given Load Balancer](#get-usage-statistics-of-a-given-load-balancer) + - [List Load Balancers](#list-load-balancers) + - [Migrate a Load Balancer](#migrate-a-load-balancer) + - [Update a Load Balancer](#update-a-load-balancer) - [Wait for a load balancer to reach a stable state](#wait-for-a-load-balancer-to-reach-a-stable-state) - [Load balancer types management commands](#load-balancer-types-management-commands) - - [List all load balancer offer type](#list-all-load-balancer-offer-type) + - [List all Load Balancer offer types](#list-all-load-balancer-offer-types) - [Private networks management commands](#private-networks-management-commands) - - [Add load balancer on instance private network](#add-load-balancer-on-instance-private-network) - - [Remove load balancer of private network](#remove-load-balancer-of-private-network) - - [List attached private network of load balancer](#list-attached-private-network-of-load-balancer) + - [Attach a Load Balancer to a Private Network](#attach-a-load-balancer-to-a-private-network) + - [Detach Load Balancer from Private Network](#detach-load-balancer-from-private-network) + - [List Private Networks attached to a Load Balancer](#list-private-networks-attached-to-a-load-balancer) - [Route rules management commands](#route-rules-management-commands) - - [Create a backend redirection](#create-a-backend-redirection) - - [Delete a backend redirection](#delete-a-backend-redirection) - - [Get single backend redirection](#get-single-backend-redirection) - - [List all backend redirections](#list-all-backend-redirections) - - [Edit a backend redirection](#edit-a-backend-redirection) + - [Create a route](#create-a-route) + - [Delete a route](#delete-a-route) + - [Get a route](#get-a-route) + - [List all routes](#list-all-routes) + - [Update a route](#update-a-route) ## Access Control List (ACL) management commands @@ -67,7 +67,7 @@ Access Control List (ACL) management commands. ### Create an ACL for a given frontend -Create an ACL for a given frontend. +Create a new ACL for a given frontend. Each ACL must have a name, an action to perform (allow or deny), and a match rule (the action is carried out when the incoming traffic matches the rule). **Usage:** @@ -80,26 +80,26 @@ scw lb acl create [arg=value ...] | Name | | Description | |------|---|-------------| -| frontend-id | Required | ID of your frontend | -| name | Required
Default: `` | Name of your ACL ressource | -| action.type | One of: `allow`, `deny`, `redirect` | The action type | +| frontend-id | Required | Frontend ID to attach the ACL to | +| name | Required
Default: `` | ACL name | +| action.type | One of: `allow`, `deny`, `redirect` | Action to take when incoming traffic matches an ACL filter | | action.redirect.type | One of: `location`, `scheme` | Redirect type | -| action.redirect.target | | Redirect target (target URL for `location`, or target `scheme`) | +| action.redirect.target | | Redirect target. For a location redirect, you can use a URL e.g. `https://scaleway.com`. Using a scheme name (e.g. `https`, `http`, `ftp`, `git`) will replace the request's original scheme. This can be useful to implement HTTP to HTTPS redirects. Valid placeholders that can be used in a `location` redirect to preserve parts of the original request in the redirection URL are {{ host }}, {{ query }}, {{ path }} and {{ scheme }} | | action.redirect.code | | HTTP redirect code to use. Valid values are 301, 302, 303, 307 and 308. Default value is 302 | -| match.ip-subnet.{index} | | A list of IPs or CIDR v4/v6 addresses of the client of the session to match | -| match.http-filter | One of: `acl_http_filter_none`, `path_begin`, `path_end`, `regex`, `http_header_match` | The HTTP filter to match | -| match.http-filter-value.{index} | | A list of possible values to match for the given HTTP filter | -| match.http-filter-option | | A exra parameter. You can use this field with http_header_match acl type to set the header name to filter | -| match.invert | | If set to `true`, the ACL matching condition will be of type "UNLESS" | -| index | Required | Order between your Acls (ascending order, 0 is first acl executed) | -| description | | Description of your ACL ressource | +| match.ip-subnet.{index} | | List of IPs or CIDR v4/v6 addresses to filter for from the client side | +| match.http-filter | One of: `acl_http_filter_none`, `path_begin`, `path_end`, `regex`, `http_header_match` | Type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends | +| match.http-filter-value.{index} | | List of values to filter for | +| match.http-filter-option | | Name of the HTTP header to filter on if `http_header_match` was selected in `http_filter` | +| match.invert | | Defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match | +| index | Required | Priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed) | +| description | | ACL description | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | ### Delete an ACL -Delete an ACL. +Delete an ACL, specified by its ACL ID. Deleting an ACL is irreversible and cannot be undone. **Usage:** @@ -112,14 +112,14 @@ scw lb acl delete [arg=value ...] | Name | | Description | |------|---|-------------| -| acl-id | Required | ID of your ACL ressource | +| acl-id | Required | ACL ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | ### Get an ACL -Get an ACL. +Get information for a particular ACL, specified by its ACL ID. The response returns full details of the ACL, including its name, action, match rule and frontend. **Usage:** @@ -132,14 +132,14 @@ scw lb acl get [arg=value ...] | Name | | Description | |------|---|-------------| -| acl-id | Required | ID of your ACL ressource | +| acl-id | Required | ACL ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### List ACL for a given frontend +### List ACLs for a given frontend -List ACL for a given frontend. +List the ACLs for a given frontend, specified by its frontend ID. The response is an array of ACL objects, each one representing an ACL that denies or allows traffic based on certain conditions. **Usage:** @@ -152,16 +152,16 @@ scw lb acl list [arg=value ...] | Name | | Description | |------|---|-------------| -| frontend-id | Required | ID of your frontend | -| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Response order | -| name | | Filter acl per name | +| frontend-id | Required | Frontend ID (ACLs attached to this frontend will be returned in the response) | +| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Sort order of ACLs in the response | +| name | | ACL name to filter for | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | -### Set all ACLs for a given frontend +### Define all ACLs for a given frontend -Set all ACLs for a given frontend. +For a given frontend specified by its frontend ID, define and add the complete set of ACLS for that frontend. Any existing ACLs on this frontend will be removed. **Usage:** @@ -174,26 +174,26 @@ scw lb acl set [arg=value ...] | Name | | Description | |------|---|-------------| -| acls.{index}.name | Required | Name of your ACL resource | -| acls.{index}.action.type | One of: `allow`, `deny`, `redirect` | The action type | +| acls.{index}.name | Required | ACL name | +| acls.{index}.action.type | One of: `allow`, `deny`, `redirect` | Action to take when incoming traffic matches an ACL filter | | acls.{index}.action.redirect.type | One of: `location`, `scheme` | Redirect type | -| acls.{index}.action.redirect.target | | Redirect target (target URL for `location`, or target `scheme`) | +| acls.{index}.action.redirect.target | | Redirect target. For a location redirect, you can use a URL e.g. `https://scaleway.com`. Using a scheme name (e.g. `https`, `http`, `ftp`, `git`) will replace the request's original scheme. This can be useful to implement HTTP to HTTPS redirects. Valid placeholders that can be used in a `location` redirect to preserve parts of the original request in the redirection URL are {{ host }}, {{ query }}, {{ path }} and {{ scheme }} | | acls.{index}.action.redirect.code | | HTTP redirect code to use. Valid values are 301, 302, 303, 307 and 308. Default value is 302 | -| acls.{index}.match.ip-subnet.{index} | | A list of IPs or CIDR v4/v6 addresses of the client of the session to match | -| acls.{index}.match.http-filter | One of: `acl_http_filter_none`, `path_begin`, `path_end`, `regex`, `http_header_match` | The HTTP filter to match | -| acls.{index}.match.http-filter-value.{index} | | A list of possible values to match for the given HTTP filter | -| acls.{index}.match.http-filter-option | | A exra parameter. You can use this field with http_header_match acl type to set the header name to filter | -| acls.{index}.match.invert | | If set to `true`, the ACL matching condition will be of type "UNLESS" | -| acls.{index}.index | Required | Order between your Acls (ascending order, 0 is first acl executed) | -| acls.{index}.description | | Description of your ACL ressource | -| frontend-id | Required | The Frontend to change ACL to | +| acls.{index}.match.ip-subnet.{index} | | List of IPs or CIDR v4/v6 addresses to filter for from the client side | +| acls.{index}.match.http-filter | One of: `acl_http_filter_none`, `path_begin`, `path_end`, `regex`, `http_header_match` | Type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends | +| acls.{index}.match.http-filter-value.{index} | | List of values to filter for | +| acls.{index}.match.http-filter-option | | Name of the HTTP header to filter on if `http_header_match` was selected in `http_filter` | +| acls.{index}.match.invert | | Defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match | +| acls.{index}.index | Required | Priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed) | +| acls.{index}.description | | ACL description | +| frontend-id | Required | Frontend ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | ### Update an ACL -Update an ACL. +Update a particular ACL, specified by its ACL ID. You can update details including its name, action and match rule. **Usage:** @@ -206,19 +206,19 @@ scw lb acl update [arg=value ...] | Name | | Description | |------|---|-------------| -| acl-id | Required | ID of your ACL ressource | -| name | Required | Name of your ACL ressource | -| action.type | One of: `allow`, `deny`, `redirect` | The action type | +| acl-id | Required | ACL ID | +| name | Required | ACL name | +| action.type | One of: `allow`, `deny`, `redirect` | Action to take when incoming traffic matches an ACL filter | | action.redirect.type | One of: `location`, `scheme` | Redirect type | -| action.redirect.target | | Redirect target (target URL for `location`, or target `scheme`) | +| action.redirect.target | | Redirect target. For a location redirect, you can use a URL e.g. `https://scaleway.com`. Using a scheme name (e.g. `https`, `http`, `ftp`, `git`) will replace the request's original scheme. This can be useful to implement HTTP to HTTPS redirects. Valid placeholders that can be used in a `location` redirect to preserve parts of the original request in the redirection URL are {{ host }}, {{ query }}, {{ path }} and {{ scheme }} | | action.redirect.code | | HTTP redirect code to use. Valid values are 301, 302, 303, 307 and 308. Default value is 302 | -| match.ip-subnet.{index} | | A list of IPs or CIDR v4/v6 addresses of the client of the session to match | -| match.http-filter | One of: `acl_http_filter_none`, `path_begin`, `path_end`, `regex`, `http_header_match` | The HTTP filter to match | -| match.http-filter-value.{index} | | A list of possible values to match for the given HTTP filter | -| match.http-filter-option | | A exra parameter. You can use this field with http_header_match acl type to set the header name to filter | -| match.invert | | If set to `true`, the ACL matching condition will be of type "UNLESS" | -| index | Required | Order between your Acls (ascending order, 0 is first acl executed) | -| description | | Description of your ACL ressource | +| match.ip-subnet.{index} | | List of IPs or CIDR v4/v6 addresses to filter for from the client side | +| match.http-filter | One of: `acl_http_filter_none`, `path_begin`, `path_end`, `regex`, `http_header_match` | Type of HTTP filter to match. Extracts the request's URL path, which starts at the first slash and ends before the question mark (without the host part). Defines where to filter for the http_filter_value. Only supported for HTTP backends | +| match.http-filter-value.{index} | | List of values to filter for | +| match.http-filter-option | | Name of the HTTP header to filter on if `http_header_match` was selected in `http_filter` | +| match.invert | | Defines whether to invert the match condition. If set to `true`, the ACL carries out its action when the condition DOES NOT match | +| index | Required | Priority of this ACL (ACLs are applied in ascending order, 0 is the first ACL executed) | +| description | | ACL description | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | @@ -228,9 +228,9 @@ scw lb acl update [arg=value ...] Backend management commands. -### Add a set of servers in a given backend +### Add a set of backend servers to a given backend -Add a set of servers in a given backend. +For a given backend specified by its backend ID, add a set of backend servers (identified by their IP addresses) it should forward traffic to. These will be appended to any existing set of backend servers for this backend. **Usage:** @@ -249,14 +249,14 @@ scw lb backend add-servers [arg=value ...] | use-instance-server-public-ip | | Use public IP address of the instance instead of the private one | | baremetal-server-id.{index} | | UIID of the baremetal server. | | baremetal-server-tag.{index} | | Tag of the baremetal server. | -| server-ip.{index} | Required | Set all IPs to add on your backend | +| server-ip.{index} | Required | List of IP addresses to add to backend servers | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Create a backend in a given load balancer +### Create a backend for a given Load Balancer -Create a backend in a given load balancer. +Create a new backend for a given Load Balancer, specifying its full configuration including protocol, port and forwarding algorithm. **Usage:** @@ -269,51 +269,51 @@ scw lb backend create [arg=value ...] | Name | | Description | |------|---|-------------| -| name | Required
Default: `` | Resource name | -| forward-protocol | Required
One of: `tcp`, `http` | Backend protocol. TCP or HTTP | -| forward-port | Required | User sessions will be forwarded to this port of backend servers | -| forward-port-algorithm | Required
Default: `roundrobin`
One of: `roundrobin`, `leastconn`, `first` | Load balancing algorithm | -| sticky-sessions | Required
Default: `none`
One of: `none`, `cookie`, `table` | Enables cookie-based session persistence | -| sticky-sessions-cookie-name | | Cookie name for sticky sessions | +| name | Required
Default: `` | Name for the backend | +| forward-protocol | Required
One of: `tcp`, `http` | Protocol to be used by the backend when forwarding traffic to backend servers | +| forward-port | Required | Port to be used by the backend when forwarding traffic to backend servers | +| forward-port-algorithm | Required
Default: `roundrobin`
One of: `roundrobin`, `leastconn`, `first` | Load balancing algorithm to be used when determining which backend server to forward new traffic to | +| sticky-sessions | Required
Default: `none`
One of: `none`, `cookie`, `table` | Defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie TO stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server | +| sticky-sessions-cookie-name | | Cookie name for cookie-based sticky sessions | | health-check.mysql-config.user | | | -| health-check.check-max-retries | | Number of consecutive unsuccessful health checks, after which the server will be considered dead | +| health-check.check-max-retries | | Number of consecutive unsuccessful health checks after which the server will be considered dead | | health-check.pgsql-config.user | | | -| health-check.http-config.uri | | HTTP uri used with the request | -| health-check.http-config.method | | HTTP method used with the request | -| health-check.http-config.code | | HTTP response code so the Healthcheck is considered successfull | -| health-check.http-config.host-header | | HTTP host header used with the request | -| health-check.https-config.uri | | HTTP uri used with the request | -| health-check.https-config.method | | HTTP method used with the request | -| health-check.https-config.code | | HTTP response code so the Healthcheck is considered successfull | -| health-check.https-config.host-header | | HTTP host header used with the request | -| health-check.https-config.sni | | Specifies the SNI to use to do health checks over SSL | -| health-check.port | | TCP port to use for the backend server health check | +| health-check.http-config.uri | | HTTP URI used for the health check | +| health-check.http-config.method | | HTTP method used for the health check | +| health-check.http-config.code | | HTTP response code expected for a successful health check | +| health-check.http-config.host-header | | HTTP host header used for the health check | +| health-check.https-config.uri | | HTTP URI used for the health check | +| health-check.https-config.method | | HTTP method used for the health check | +| health-check.https-config.code | | HTTP response code expected for a successful health check | +| health-check.https-config.host-header | | HTTP host header used for the health check | +| health-check.https-config.sni | | SNI used for SSL health checks | +| health-check.port | | Port to use for the backend server health check | | health-check.check-timeout | | Maximum time a backend server has to reply to the health check | -| health-check.check-delay | | Time between two consecutive health checks | -| health-check.check-send-proxy | | It defines whether the health check should be done considering the proxy protocol | -| lb-id | Required | Load balancer ID | +| health-check.check-delay | | Time to wait between two consecutive health checks | +| health-check.check-send-proxy | | Defines whether proxy protocol should be activated for the health check | +| lb-id | Required | Load Balancer ID | | instance-server-id.{index} | | UIID of the instance server. | | instance-server-tag.{index} | | Tag of the instance server. | | use-instance-server-public-ip | | Use public IP address of the instance instead of the private one | | baremetal-server-id.{index} | | UIID of the baremetal server. | | baremetal-server-tag.{index} | | Tag of the baremetal server. | -| server-ip.{index} | Required | Backend server IP addresses list (IPv4 or IPv6) | -| ~~send-proxy-v2~~ | Deprecated | Deprecated in favor of proxy_protocol field ! | -| timeout-server | | Maximum server connection inactivity time (allowed time the server has to process the request) | -| timeout-connect | | Maximum initial server connection establishment time | -| timeout-tunnel | | Maximum tunnel inactivity time after Websocket is established (take precedence over client and server timeout) | -| on-marked-down-action | One of: `on_marked_down_action_none`, `shutdown_sessions` | Modify what occurs when a backend server is marked down | -| proxy-protocol | One of: `proxy_protocol_unknown`, `proxy_protocol_none`, `proxy_protocol_v1`, `proxy_protocol_v2`, `proxy_protocol_v2_ssl`, `proxy_protocol_v2_ssl_cn` | PROXY protocol, forward client's address (must be supported by backend servers software) | -| failover-host | | Scaleway S3 bucket website to be served in case all backend servers are down | -| ssl-bridging | | Enable SSL between load balancer and backend servers | -| ignore-ssl-server-verify | | Set to true to ignore server certificate verification | +| server-ip.{index} | Required | List of backend server IP addresses (IPv4 or IPv6) the backend should forward traffic to | +| ~~send-proxy-v2~~ | Deprecated | Deprecated in favor of proxy_protocol field | +| timeout-server | | Maximum allowed time for a backend server to process a request | +| timeout-connect | | Maximum allowed time for establishing a connection to a backend server | +| timeout-tunnel | | Maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout) | +| on-marked-down-action | One of: `on_marked_down_action_none`, `shutdown_sessions` | Action to take when a backend server is marked as down | +| proxy-protocol | One of: `proxy_protocol_unknown`, `proxy_protocol_none`, `proxy_protocol_v1`, `proxy_protocol_v2`, `proxy_protocol_v2_ssl`, `proxy_protocol_v2_ssl_cn` | PROXY protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. PROXY protocol must be supported by the backend servers' software | +| failover-host | | Scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. Do not include the scheme (eg https://) | +| ssl-bridging | | Defines whether to enable SSL between the Load Balancer and backend servers | +| ignore-ssl-server-verify | | Defines whether the server certificate verification should be ignored | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Delete a backend in a given load balancer +### Delete a backend of a given Load Balancer -Delete a backend in a given load balancer. +Delete a backend of a given Load Balancer, specified by its backend ID. This action is irreversible and cannot be undone. **Usage:** @@ -331,9 +331,9 @@ scw lb backend delete [arg=value ...] -### Get a backend in a given load balancer +### Get a backend of a given Load Balancer -Get a backend in a given load balancer. +Get the full details of a given backend, specified by its backend ID. The response contains the backend's full configuration parameters including protocol, port and forwarding algorithm. **Usage:** @@ -351,9 +351,9 @@ scw lb backend get [arg=value ...] -### List backends in a given load balancer +### List the backends of a given Load Balancer -List backends in a given load balancer. +List all the backends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each backend. The response is an array of backend objects, containing full details of each one including their configuration parameters such as protocol, port and forwarding algorithm. **Usage:** @@ -366,16 +366,16 @@ scw lb backend list [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| name | | Use this to search by name | -| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Response order | +| lb-id | Required | Load Balancer ID | +| name | | Name of the backend to filter for | +| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Sort order of backends in the response | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | ### Remove a set of servers for a given backend -Remove a set of servers for a given backend. +For a given backend specified by its backend ID, remove the specified backend servers (identified by their IP addresses) so that it no longer forwards traffic to them. **Usage:** @@ -394,14 +394,14 @@ scw lb backend remove-servers [arg=value ...] | use-instance-server-public-ip | | Use public IP address of the instance instead of the private one | | baremetal-server-id.{index} | | UIID of the baremetal server. | | baremetal-server-tag.{index} | | Tag of the baremetal server. | -| server-ip.{index} | Required | Set all IPs to remove of your backend | +| server-ip.{index} | Required | List of IP addresses to remove from backend servers | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Define all servers in a given backend +### Define all backend servers for a given backend -Define all servers in a given backend. +For a given backend specified by its backend ID, define the set of backend servers (identified by their IP addresses) that it should forward traffic to. Any existing backend servers configured for this backend will be removed. **Usage:** @@ -420,14 +420,14 @@ scw lb backend set-servers [arg=value ...] | use-instance-server-public-ip | | Use public IP address of the instance instead of the private one | | baremetal-server-id.{index} | | UIID of the baremetal server. | | baremetal-server-tag.{index} | | Tag of the baremetal server. | -| server-ip.{index} | Required | Set all IPs to add on your backend and remove all other | +| server-ip.{index} | Required | List of IP addresses for backend servers. Any other existing backend servers will be removed | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Update a backend in a given load balancer +### Update a backend of a given Load Balancer -Update a backend in a given load balancer. +Update a backend of a given Load Balancer, specified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. **Usage:** @@ -440,29 +440,29 @@ scw lb backend update [arg=value ...] | Name | | Description | |------|---|-------------| -| backend-id | Required | Backend ID to update | -| name | Required | Resource name | -| forward-protocol | Required
One of: `tcp`, `http` | Backend protocol. TCP or HTTP | -| forward-port | Required | User sessions will be forwarded to this port of backend servers | -| forward-port-algorithm | Required
One of: `roundrobin`, `leastconn`, `first` | Load balancing algorithm | -| sticky-sessions | Required
One of: `none`, `cookie`, `table` | Enable cookie-based session persistence | -| sticky-sessions-cookie-name | | Cookie name for sticky sessions | -| ~~send-proxy-v2~~ | Deprecated | Deprecated in favor of proxy_protocol field! | -| timeout-server | | Maximum server connection inactivity time (allowed time the server has to process the request) | -| timeout-connect | | Maximum initial server connection establishment time | -| timeout-tunnel | | Maximum tunnel inactivity time after Websocket is established (take precedence over client and server timeout) | -| on-marked-down-action | One of: `on_marked_down_action_none`, `shutdown_sessions` | Modify what occurs when a backend server is marked down | -| proxy-protocol | One of: `proxy_protocol_unknown`, `proxy_protocol_none`, `proxy_protocol_v1`, `proxy_protocol_v2`, `proxy_protocol_v2_ssl`, `proxy_protocol_v2_ssl_cn` | PROXY protocol, forward client's address (must be supported by backend servers software) | -| failover-host | | Scaleway S3 bucket website to be served in case all backend servers are down | -| ssl-bridging | | Enable SSL between load balancer and backend servers | -| ignore-ssl-server-verify | | Set to true to ignore server certificate verification | +| backend-id | Required | Backend ID | +| name | Required | Backend name | +| forward-protocol | Required
One of: `tcp`, `http` | Protocol to be used by the backend when forwarding traffic to backend servers | +| forward-port | Required | Port to be used by the backend when forwarding traffic to backend servers | +| forward-port-algorithm | Required
One of: `roundrobin`, `leastconn`, `first` | Load balancing algorithm to be used when determining which backend server to forward new traffic to | +| sticky-sessions | Required
One of: `none`, `cookie`, `table` | Defines whether to activate sticky sessions (binding a particular session to a particular backend server) and the method to use if so. None disables sticky sessions. Cookie-based uses an HTTP cookie to stick a session to a backend server. Table-based uses the source (client) IP address to stick a session to a backend server | +| sticky-sessions-cookie-name | | Cookie name for cookie-based sticky sessions | +| ~~send-proxy-v2~~ | Deprecated | Deprecated in favor of proxy_protocol field | +| timeout-server | | Maximum allowed time for a backend server to process a request | +| timeout-connect | | Maximum allowed time for establishing a connection to a backend server | +| timeout-tunnel | | Maximum allowed tunnel inactivity time after Websocket is established (takes precedence over client and server timeout) | +| on-marked-down-action | One of: `on_marked_down_action_none`, `shutdown_sessions` | Action to take when a backend server is marked down | +| proxy-protocol | One of: `proxy_protocol_unknown`, `proxy_protocol_none`, `proxy_protocol_v1`, `proxy_protocol_v2`, `proxy_protocol_v2_ssl`, `proxy_protocol_v2_ssl_cn` | PROXY protocol to use between the Load Balancer and backend servers. Allows the backend servers to be informed of the client's real IP address. PROXY protocol must be supported by the backend servers' software | +| failover-host | | Scaleway S3 bucket website to be served as failover if all backend servers are down, e.g. failover-website.s3-website.fr-par.scw.cloud. Do not include the scheme (eg https://) | +| ssl-bridging | | Defines whether to enable SSL bridging between the Load Balancer and backend servers | +| ignore-ssl-server-verify | | Defines whether the server certificate verification should be ignored | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Update an healthcheck for a given backend +### Update a health check for a given backend -Update an healthcheck for a given backend. +Update the configuration of the health check performed by a given backend to verify the health of its backend servers, identified by its backend ID. Note that the request type is PUT and not PATCH. You must set all parameters. **Usage:** @@ -475,23 +475,23 @@ scw lb backend update-healthcheck [arg=value ...] | Name | | Description | |------|---|-------------| -| port | Required | Specify the port used to health check | -| check-delay | Required | Time between two consecutive health checks | +| port | Required | Port to use for the backend server health check | +| check-delay | Required | Time to wait between two consecutive health checks | | check-timeout | Required | Maximum time a backend server has to reply to the health check | | check-max-retries | Required | Number of consecutive unsuccessful health checks, after which the server will be considered dead | | backend-id | Required | Backend ID | | mysql-config.user | | | | pgsql-config.user | | | -| http-config.uri | | HTTP uri used with the request | -| http-config.method | | HTTP method used with the request | -| http-config.code | | HTTP response code so the Healthcheck is considered successfull | -| http-config.host-header | | HTTP host header used with the request | -| https-config.uri | | HTTP uri used with the request | -| https-config.method | | HTTP method used with the request | -| https-config.code | | HTTP response code so the Healthcheck is considered successfull | -| https-config.host-header | | HTTP host header used with the request | -| https-config.sni | | Specifies the SNI to use to do health checks over SSL | -| check-send-proxy | | It defines whether the health check should be done considering the proxy protocol | +| http-config.uri | | HTTP URI used for the health check | +| http-config.method | | HTTP method used for the health check | +| http-config.code | | HTTP response code expected for a successful health check | +| http-config.host-header | | HTTP host header used for the health check | +| https-config.uri | | HTTP URI used for the health check | +| https-config.method | | HTTP method used for the health check | +| https-config.code | | HTTP response code expected for a successful health check | +| https-config.host-header | | HTTP host header used for the health check | +| https-config.sni | | SNI used for SSL health checks | +| check-send-proxy | | Defines whether proxy protocol should be activated for the health check | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | @@ -501,9 +501,9 @@ scw lb backend update-healthcheck [arg=value ...] TLS certificate management commands. -### Create a TLS certificate +### Create an SSL/TLS certificate -Generate a new TLS certificate using Let's Encrypt or import your certificate. +Generate a new SSL/TLS certificate for a given Load Balancer. You can choose to create a Let's Encrypt certificate, or import a custom certificate. **Usage:** @@ -516,18 +516,18 @@ scw lb certificate create [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| name | Required
Default: `` | Certificate name | -| letsencrypt-common-name | | Main domain name of certificate (make sure this domain exists and resolves to your load balancer HA IP) | -| letsencrypt-alternative-name.{index} | | Alternative domain names (make sure all domain names exists and resolves to your load balancer HA IP) | -| custom-certificate-chain | | The full PEM-formatted include an entire certificate chain including public key, private key, and optionally certificate authorities. | +| lb-id | Required | Load Balancer ID | +| name | Required
Default: `` | Name for the certificate | +| letsencrypt-common-name | | Main domain name of certificate (this domain must exist and resolve to your Load Balancer IP address) | +| letsencrypt-alternative-name.{index} | | Alternative domain names (all domain names must exist and resolve to your Load Balancer IP address) | +| custom-certificate-chain | | Full PEM-formatted certificate, consisting of the entire certificate chain including public key, private key, and (optionally) Certificate Authorities | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Delete a TLS certificate +### Delete an SSL/TLS certificate -Delete a TLS certificate. +Delete an SSL/TLS certificate, specified by its certificate ID. Deleting a certificate is irreversible and cannot be undone. **Usage:** @@ -545,9 +545,9 @@ scw lb certificate delete [arg=value ...] -### Get a TLS certificate +### Get an SSL/TLS certificate -Get a TLS certificate. +Get information for a particular SSL/TLS certificate, specified by its certificate ID. The response returns full details of the certificate, including its type, main domain name, and alternative domain names. **Usage:** @@ -565,9 +565,9 @@ scw lb certificate get [arg=value ...] -### List all TLS certificates on a given load balancer +### List all SSL/TLS certificates on a given Load Balancer -List all TLS certificates on a given load balancer. +List all the SSL/TLS certificates on a given Load Balancer. The response is an array of certificate objects, which are by default listed in ascending order of creation date. **Usage:** @@ -580,16 +580,16 @@ scw lb certificate list [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Response order | -| name | | Use this to search by name | +| lb-id | Required | Load Balancer ID | +| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Sort order of certificates in the response | +| name | | Certificate name to filter for, only certificates of this name will be returned | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | -### Update a TLS certificate +### Update an SSL/TLS certificate -Update a TLS certificate. +Update the name of a particular SSL/TLS certificate, specified by its certificate ID. **Usage:** @@ -613,9 +613,9 @@ scw lb certificate update [arg=value ...] Frontend management commands. -### Create a frontend in a given load balancer +### Create a frontend in a given Load Balancer -Create a frontend in a given load balancer. +Create a new frontend for a given Load Balancer, specifying its configuration including the port it should listen on and the backend to attach it to. **Usage:** @@ -628,21 +628,21 @@ scw lb frontend create [arg=value ...] | Name | | Description | |------|---|-------------| -| name | Required
Default: `` | Resource name | -| inbound-port | Required | TCP port to listen on the front side | -| lb-id | Required | Load balancer ID | -| backend-id | Required | Backend ID | -| timeout-client | | Set the maximum inactivity time on the client side | -| ~~certificate-id~~ | Deprecated | Certificate ID, deprecated in favor of certificate_ids array ! | -| certificate-ids.{index} | | List of certificate IDs to bind on the frontend | -| enable-http3 | | Activate HTTP 3 protocol (beta) | +| name | Required
Default: `` | Name for the frontend | +| inbound-port | Required | Port the frontend should listen on | +| lb-id | Required | Load Balancer ID (ID of the Load Balancer to attach the frontend to) | +| backend-id | Required | Backend ID (ID of the backend the frontend should pass traffic to) | +| timeout-client | | Maximum allowed inactivity time on the client side | +| ~~certificate-id~~ | Deprecated | Certificate ID, deprecated in favor of certificate_ids array | +| certificate-ids.{index} | | List of SSL/TLS certificate IDs to bind to the frontend | +| enable-http3 | | Defines whether to enable HTTP/3 protocol on the frontend | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | ### Delete a frontend -Delete a frontend. +Delete a given frontend, specified by its frontend ID. This action is irreversible and cannot be undone. **Usage:** @@ -655,14 +655,14 @@ scw lb frontend delete [arg=value ...] | Name | | Description | |------|---|-------------| -| frontend-id | Required | Frontend ID to delete | +| frontend-id | Required | ID of the frontend to delete | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | ### Get a frontend -Get a frontend. +Get the full details of a given frontend, specified by its frontend ID. The response contains the frontend's full configuration parameters including the backend it is attached to, the port it listens on, and any certificates it has. **Usage:** @@ -680,9 +680,9 @@ scw lb frontend get [arg=value ...] -### List frontends in a given load balancer +### List frontends of a given Load Balancer -List frontends in a given load balancer. +List all the frontends of a Load Balancer, specified by its Load Balancer ID. By default, results are returned in ascending order by the creation date of each frontend. The response is an array of frontend objects, containing full details of each one including the port they listen on and the backend they are attached to. **Usage:** @@ -695,16 +695,16 @@ scw lb frontend list [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| name | | Use this to search by name | -| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Response order | +| lb-id | Required | Load Balancer ID | +| name | | Name of the frontend to filter for | +| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Sort order of frontends in the response | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | ### Update a frontend -Update a frontend. +Update a given frontend, specified by its frontend ID. You can update configuration parameters including its name and the port it listens on. Note that the request type is PUT and not PATCH. You must set all parameters. **Usage:** @@ -718,13 +718,13 @@ scw lb frontend update [arg=value ...] | Name | | Description | |------|---|-------------| | frontend-id | Required | Frontend ID | -| name | Required | Resource name | -| inbound-port | Required | TCP port to listen on the front side | -| backend-id | Required | Backend ID | -| timeout-client | | Client session maximum inactivity time | -| ~~certificate-id~~ | Deprecated | Certificate ID, deprecated in favor of `certificate_ids` array! | -| certificate-ids.{index} | | List of certificate IDs to bind on the frontend | -| enable-http3 | | Activate HTTP 3 protocol (beta) | +| name | Required | Frontend name | +| inbound-port | Required | Port the frontend should listen on | +| backend-id | Required | Backend ID (ID of the backend the frontend should pass traffic to) | +| timeout-client | | Maximum allowed inactivity time on the client side | +| ~~certificate-id~~ | Deprecated | Certificate ID, deprecated in favor of certificate_ids array | +| certificate-ids.{index} | | List of SSL/TLS certificate IDs to bind to the frontend | +| enable-http3 | | Defines whether to enable HTTP/3 protocol on the frontend | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | @@ -734,9 +734,9 @@ scw lb frontend update [arg=value ...] IP management commands. -### Create an IP +### Create an IP address -Create an IP. +Create a new Load Balancer flexible IP address, in the specified Scaleway Project. This can be attached to new Load Balancers created in the future. **Usage:** @@ -750,15 +750,15 @@ scw lb ip create [arg=value ...] | Name | | Description | |------|---|-------------| | project-id | | Project ID to use. If none is passed the default project ID will be used | -| reverse | | Reverse domain name | +| reverse | | Reverse DNS (domain name) for the IP address | | organization-id | | Organization ID to use. If none is passed the default organization ID will be used | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Delete an IP +### Delete an IP address -Delete an IP. +Delete a Load Balancer flexible IP address. This action is irreversible, and cannot be undone. **Usage:** @@ -776,9 +776,9 @@ scw lb ip delete [arg=value ...] -### Get an IP +### Get an IP address -Get an IP. +Retrieve the full details of a Load Balancer flexible IP address. **Usage:** @@ -796,9 +796,9 @@ scw lb ip get [arg=value ...] -### List IPs +### List IP addresses -List IPs. +List the Load Balancer flexible IP addresses held in the account (filtered by Organization ID or Project ID). It is also possible to search for a specific IP address. **Usage:** @@ -811,16 +811,16 @@ scw lb ip list [arg=value ...] | Name | | Description | |------|---|-------------| -| ip-address | | Use this to search by IP address | -| project-id | | Filter IPs by project ID | -| organization-id | | Filter IPs by organization id | +| ip-address | | IP address to filter for | +| project-id | | Project ID to filter for, only Load Balancer IP addresses from this Project will be returned | +| organization-id | | Organization ID to filter for, only Load Balancer IP addresses from this Organization will be returned | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | -### Update an IP +### Update an IP address -Update an IP. +Update the reverse DNS of a Load Balancer flexible IP address. **Usage:** @@ -834,7 +834,7 @@ scw lb ip update [arg=value ...] | Name | | Description | |------|---|-------------| | ip-id | Required | IP address ID | -| reverse | | Reverse DNS | +| reverse | | Reverse DNS (domain name) for the IP address | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | @@ -844,9 +844,9 @@ scw lb ip update [arg=value ...] Load balancer management commands. -### Create a load balancer +### Create a Load Balancer -Create a load balancer. +Create a new Load Balancer. Note that the Load Balancer will be created without frontends or backends; these must be created separately via the dedicated endpoints. **Usage:** @@ -860,20 +860,20 @@ scw lb lb create [arg=value ...] | Name | | Description | |------|---|-------------| | project-id | | Project ID to use. If none is passed the default project ID will be used | -| name | Required
Default: `` | Resource names | -| description | | Resource description | -| ip-id | | Just like for compute instances, when you destroy a load balancer, you can keep its highly available IP address and reuse it for another load balancer later | -| tags.{index} | | List of keyword | -| type | Default: `LB-S`
One of: `LB-S`, `LB-GP-M`, `LB-GP-L` | Load balancer offer type | -| ssl-compatibility-level | One of: `ssl_compatibility_level_unknown`, `ssl_compatibility_level_intermediate`, `ssl_compatibility_level_modern`, `ssl_compatibility_level_old` | | +| name | Required
Default: `` | Name for the Load Balancer | +| description | | Description for the Load Balancer | +| ip-id | | ID of an existing flexible IP address to attach to the Load Balancer | +| tags.{index} | | List of tags for the Load Balancer | +| type | Default: `LB-S`
One of: `LB-S`, `LB-GP-M`, `LB-GP-L` | Load Balancer commercial offer type. Use the Load Balancer types endpoint to retrieve a list of available offer types | +| ssl-compatibility-level | One of: `ssl_compatibility_level_unknown`, `ssl_compatibility_level_intermediate`, `ssl_compatibility_level_modern`, `ssl_compatibility_level_old` | Determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and do not need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort | | organization-id | | Organization ID to use. If none is passed the default organization ID will be used | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Delete a load balancer +### Delete a Load Balancer -Delete a load balancer. +Delete an existing Load Balancer, specified by its Load Balancer ID. Deleting a Load Balancer is permanent, and cannot be undone. The Load Balancer's flexible IP address can either be deleted with the Load Balancer, or kept in your account for future use. **Usage:** @@ -886,15 +886,15 @@ scw lb lb delete [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| release-ip | | Set true if you don't want to keep this IP address | +| lb-id | Required | ID of the Load Balancer to delete | +| release-ip | | Defines whether the Load Balancer's flexible IP should be deleted. Set to true to release the flexible IP, or false to keep it available in your account for future Load Balancers | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Get a load balancer +### Get a Load Balancer -Get a load balancer. +Retrieve information about an existing Load Balancer, specified by its Load Balancer ID. Its full details, including name, status and IP address, are returned in the response object. **Usage:** @@ -907,14 +907,14 @@ scw lb lb get [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | +| lb-id | Required | Load Balancer ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Get usage statistics of a given load balancer +### Get usage statistics of a given Load Balancer -Get usage statistics of a given load balancer. +Get usage statistics of a given Load Balancer. **Usage:** @@ -927,14 +927,14 @@ scw lb lb get-stats [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | +| lb-id | Required | Load Balancer ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### List load balancers +### List Load Balancers -List load balancers. +List all Load Balancers in the specified zone, for a Scaleway Organization or Scaleway Project. By default, the Load Balancers returned in the list are ordered by creation date in ascending order, though this can be modified via the `order_by` field. **Usage:** @@ -947,17 +947,17 @@ scw lb lb list [arg=value ...] | Name | | Description | |------|---|-------------| -| name | | Use this to search by name | -| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Response order | -| project-id | | Filter LBs by project ID | -| organization-id | | Filter LBs by organization ID | +| name | | Load Balancer name to filter for | +| order-by | One of: `created_at_asc`, `created_at_desc`, `name_asc`, `name_desc` | Sort order of Load Balancers in the response | +| project-id | | Project ID to filter for, only Load Balancers from this Project will be returned | +| organization-id | | Organization ID to filter for, only Load Balancers from this Organization will be returned | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | -### Migrate a load balancer +### Migrate a Load Balancer -Migrate a load balancer. +Migrate an existing Load Balancer from one commercial type to another. Allows you to scale your Load Balancer up or down in terms of bandwidth or multi-cloud provision. **Usage:** @@ -970,15 +970,15 @@ scw lb lb migrate [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| type | Required
One of: `LB-S`, `LB-GP-M`, `LB-GP-L` | Load balancer type (check /lb-types to list all type) | +| lb-id | Required | Load Balancer ID | +| type | Required
One of: `LB-S`, `LB-GP-M`, `LB-GP-L` | Load Balancer type to migrate to (use the List all Load Balancer offer types endpoint to get a list of available offer types) | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Update a load balancer +### Update a Load Balancer -Update a load balancer. +Update the parameters of an existing Load Balancer, specified by its Load Balancer ID. Note that the request type is PUT and not PATCH. You must set all parameters. **Usage:** @@ -991,11 +991,11 @@ scw lb lb update [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| name | Required | Resource name | -| description | Required | Resource description | -| tags.{index} | | List of keywords | -| ssl-compatibility-level | One of: `ssl_compatibility_level_unknown`, `ssl_compatibility_level_intermediate`, `ssl_compatibility_level_modern`, `ssl_compatibility_level_old` | | +| lb-id | Required | Load Balancer ID | +| name | Required | Load Balancer name | +| description | Required | Load Balancer description | +| tags.{index} | | List of tags for the Load Balancer | +| ssl-compatibility-level | One of: `ssl_compatibility_level_unknown`, `ssl_compatibility_level_intermediate`, `ssl_compatibility_level_modern`, `ssl_compatibility_level_old` | Determines the minimal SSL version which needs to be supported on the client side, in an SSL/TLS offloading context. Intermediate is suitable for general-purpose servers with a variety of clients, recommended for almost all systems. Modern is suitable for services with clients that support TLS 1.3 and don't need backward compatibility. Old is compatible with a small number of very old clients and should be used only as a last resort | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | @@ -1035,9 +1035,9 @@ scw lb lb wait 11111111-1111-1111-1111-111111111111 Load balancer types management commands. -### List all load balancer offer type +### List all Load Balancer offer types -List all load balancer offer type. +List all the different commercial Load Balancer types. The response includes an array of offer types, each with a name, description, and information about its stock availability. **Usage:** @@ -1059,9 +1059,9 @@ scw lb lb-types list [arg=value ...] Private networks management commands. -### Add load balancer on instance private network +### Attach a Load Balancer to a Private Network -Add load balancer on instance private network. +Attach a specified Load Balancer to a specified Private Network, defining a static or DHCP configuration for the Load Balancer on the network. **Usage:** @@ -1074,16 +1074,16 @@ scw lb private-network attach [arg=value ...] | Name | | Description | |------|---|-------------| -| lb-id | Required | Load balancer ID | -| private-network-id | Required | Set your instance private network id | +| lb-id | Required | Load Balancer ID | +| private-network-id | Required | Private Network ID | | static-config.ip-address.{index} | | | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Remove load balancer of private network +### Detach Load Balancer from Private Network -Remove load balancer of private network. +Detach a specified Load Balancer from a specified Private Network. **Usage:** @@ -1102,9 +1102,9 @@ scw lb private-network detach [arg=value ...] -### List attached private network of load balancer +### List Private Networks attached to a Load Balancer -List attached private network of load balancer. +List the Private Networks attached to a given Load Balancer, specified by its Load Balancer ID. The response is an array of Private Network objects, giving information including the status, configuration, name and creation date of each Private Network. **Usage:** @@ -1117,8 +1117,8 @@ scw lb private-network list [arg=value ...] | Name | | Description | |------|---|-------------| -| order-by | One of: `created_at_asc`, `created_at_desc` | Response order | -| lb-id | Required | Load balancer ID | +| order-by | One of: `created_at_asc`, `created_at_desc` | Sort order of Private Network objects in the response | +| lb-id | Required | Load Balancer ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | @@ -1128,9 +1128,9 @@ scw lb private-network list [arg=value ...] Route rules management commands. -### Create a backend redirection +### Create a route -Create a backend redirection. +Create a new route on a given frontend. To configure a route, specify the backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). **Usage:** @@ -1143,17 +1143,17 @@ scw lb route create [arg=value ...] | Name | | Description | |------|---|-------------| -| frontend-id | | Origin of redirection | -| backend-id | | Destination of destination | -| match.sni | | Server Name Indication TLS extension (SNI) | +| frontend-id | | ID of the source frontend to create the route on | +| backend-id | | ID of the target backend for the route | +| match.sni | | Server Name Indication (SNI) value to match | | match.host-header | | HTTP host header to match | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Delete a backend redirection +### Delete a route -Delete a backend redirection. +Delete an existing route, specified by its route ID. Deleting a route is permanent, and cannot be undone. **Usage:** @@ -1166,14 +1166,14 @@ scw lb route delete [arg=value ...] | Name | | Description | |------|---|-------------| -| route-id | Required | Route id to delete | +| route-id | Required | Route ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### Get single backend redirection +### Get a route -Get single backend redirection. +Retrieve information about an existing route, specified by its route ID. Its full details, origin frontend, target backend and match condition, are returned in the response object. **Usage:** @@ -1186,14 +1186,14 @@ scw lb route get [arg=value ...] | Name | | Description | |------|---|-------------| -| route-id | Required | Id of route to get | +| route-id | Required | Route ID | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | -### List all backend redirections +### List all routes -List all backend redirections. +List all routes for a given frontend. The response is an array of routes, each one with a specified backend to direct to if a certain condition is matched (based on the value of the SNI field or HTTP Host header). **Usage:** @@ -1206,15 +1206,15 @@ scw lb route list [arg=value ...] | Name | | Description | |------|---|-------------| -| order-by | One of: `created_at_asc`, `created_at_desc` | Response order | +| order-by | One of: `created_at_asc`, `created_at_desc` | Sort order of routes in the response | | frontend-id | | | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2`, `all` | Zone to target. If none is passed will use default zone from the config | -### Edit a backend redirection +### Update a route -Edit a backend redirection. +Update the configuration of an existing route, specified by its route ID. **Usage:** @@ -1227,9 +1227,9 @@ scw lb route update [arg=value ...] | Name | | Description | |------|---|-------------| -| route-id | Required | Route id to update | -| backend-id | | Backend id of redirection | -| match.sni | | Server Name Indication TLS extension (SNI) | +| route-id | Required | Route ID | +| backend-id | | ID of the target backend for the route | +| match.sni | | Server Name Indication (SNI) value to match | | match.host-header | | HTTP host header to match | | zone | Default: `fr-par-1`
One of: `fr-par-1`, `fr-par-2`, `nl-ams-1`, `nl-ams-2`, `pl-waw-1`, `pl-waw-2` | Zone to target. If none is passed will use default zone from the config | diff --git a/internal/namespaces/k8s/v1/k8s_cli.go b/internal/namespaces/k8s/v1/k8s_cli.go index 39b5d38b9b..1ce5e12f1c 100644 --- a/internal/namespaces/k8s/v1/k8s_cli.go +++ b/internal/namespaces/k8s/v1/k8s_cli.go @@ -70,9 +70,7 @@ It is composed of different pools, each pool containing the same kind of nodes. func k8sPool() *core.Command { return &core.Command{ Short: `Kapsule pool management commands`, - Long: `A pool is a set of identical Nodes. A pool has a name, a size (its current number of nodes), nodes number limits (min, max) and a Scaleway instance type. -Changing those limits increases/decreases the size of a pool. Thus, when autoscaling is enabled, the pool will grow or shrink inside those limits, depending on its load. -A "default pool" is automatically created with every cluster. + Long: `A pool is a set of identical nodes. A pool has a name, a size (its current number of nodes), node number limits (min, max), and a Scaleway Instance type. Changing those limits increases/decreases the size of a pool. Thus, the pool will grow or shrink inside those limits when autoscaling is enabled, depending on its load. A "default pool" is automatically created with every cluster. `, Namespace: "k8s", Resource: "pool", @@ -82,9 +80,7 @@ A "default pool" is automatically created with every cluster. func k8sNode() *core.Command { return &core.Command{ Short: `Kapsule node management commands`, - Long: `A node (short for worker node) is an abstraction for a Scaleway Instance. -It is part of a pool and is instantiated by Scaleway, making Kubernetes software installed and configured automatically on it. -Please note that Kubernetes nodes cannot be accessed with ssh. + Long: `A node (short for worker node) is an abstraction for a Scaleway Instance. A node is always part of a pool. Each of them will have Kubernetes software automatically installed and configured by Scaleway. Please note that Kubernetes nodes cannot be accessed with SSH. `, Namespace: "k8s", Resource: "node", @@ -94,11 +90,7 @@ Please note that Kubernetes nodes cannot be accessed with ssh. func k8sVersion() *core.Command { return &core.Command{ Short: `Available Kubernetes version commands`, - Long: `A version is a vanilla Kubernetes version like ` + "`" + `x.y.z` + "`" + `. -It is composed of a major version x, a minor version y and a patch version z. -Scaleway's managed Kubernetes, Kapsule, will at least support the last patch version for the last three minor release. - -Also each version have a different set of container runtimes, CNIs, ingresses, feature gates and admission plugins available. + Long: `A version is a vanilla Kubernetes version like ` + "`" + `x.y.z` + "`" + `. It comprises a major version x, a minor version y, and a patch version z. Scaleway's managed Kubernetes, Kapsule, will support at minimum the last patch version for the last three minor releases. Also, each version has a different set of container runtimes, CNIs, ingresses, feature gates, and admission plugins available. `, Namespace: "k8s", Resource: "version", @@ -107,8 +99,8 @@ Also each version have a different set of container runtimes, CNIs, ingresses, f func k8sClusterList() *core.Command { return &core.Command{ - Short: `List all the clusters`, - Long: `This method allows to list all the existing Kubernetes clusters in an account.`, + Short: `List all clusters`, + Long: `List all the existing Kubernetes clusters in a specific Region.`, Namespace: "k8s", Resource: "cluster", Verb: "list", @@ -117,14 +109,14 @@ func k8sClusterList() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "project-id", - Short: `The project ID on which to filter the returned clusters`, + Short: `Project ID on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, }, { Name: "order-by", - Short: `The sort order of the returned clusters`, + Short: `Sort order of the returned clusters`, Required: false, Deprecated: false, Positional: false, @@ -132,14 +124,14 @@ func k8sClusterList() *core.Command { }, { Name: "name", - Short: `The name on which to filter the returned clusters`, + Short: `Name on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, }, { Name: "status", - Short: `The status on which to filter the returned clusters`, + Short: `Status on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, @@ -147,14 +139,14 @@ func k8sClusterList() *core.Command { }, { Name: "type", - Short: `The type on which to filter the returned clusters`, + Short: `Type on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, }, { Name: "organization-id", - Short: `The organization ID on which to filter the returned clusters`, + Short: `Organization ID on which to filter the returned clusters`, Required: false, Deprecated: false, Positional: false, @@ -180,7 +172,7 @@ func k8sClusterList() *core.Command { }, Examples: []*core.Example{ { - Short: "List all the clusters on your default region", + Short: "List all clusters on your default region", ArgsJSON: `null`, }, { @@ -239,7 +231,7 @@ func k8sClusterList() *core.Command { func k8sClusterCreate() *core.Command { return &core.Command{ Short: `Create a new cluster`, - Long: `This method allows to create a new Kubernetes cluster on an account.`, + Long: `Creates a new Kubernetes cluster on a Scaleway account.`, Namespace: "k8s", Resource: "cluster", Verb: "create", @@ -249,14 +241,14 @@ func k8sClusterCreate() *core.Command { core.ProjectIDArgSpec(), { Name: "type", - Short: `The type of the cluster`, + Short: `Type of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "name", - Short: `The name of the cluster`, + Short: `Name of the cluster`, Required: true, Deprecated: false, Positional: false, @@ -264,28 +256,28 @@ func k8sClusterCreate() *core.Command { }, { Name: "description", - Short: `The description of the cluster`, + Short: `Description of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The tags associated with the cluster`, + Short: `Tags associated with the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "version", - Short: `The Kubernetes version of the cluster`, + Short: `Kubernetes version of the cluster`, Required: true, Deprecated: false, Positional: false, }, { Name: "cni", - Short: `The Container Network Interface (CNI) plugin that will run in the cluster`, + Short: `Container Network Interface (CNI) plugin that will run in the cluster`, Required: true, Deprecated: false, Positional: false, @@ -293,14 +285,14 @@ func k8sClusterCreate() *core.Command { }, { Name: "enable-dashboard", - Short: `The enablement of the Kubernetes Dashboard in the cluster`, + Short: `Defines if the Kubernetes Dashboard is enabled in the cluster`, Required: false, Deprecated: true, Positional: false, }, { Name: "ingress", - Short: `The Ingress Controller that will run in the cluster`, + Short: `Ingress Controller that will run in the cluster`, Required: false, Deprecated: true, Positional: false, @@ -308,56 +300,56 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.name", - Short: `The name of the pool`, + Short: `Name of the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "pools.{index}.node-type", - Short: `The node type is the type of Scaleway Instance wanted for the pool`, + Short: `Node type is the type of Scaleway Instance wanted for the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "pools.{index}.placement-group-id", - Short: `The placement group ID in which all the nodes of the pool will be created`, + Short: `Placement group ID in which all the nodes of the pool will be created`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.autoscaling", - Short: `The enablement of the autoscaling feature for the pool`, + Short: `Defines whether the autoscaling feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.size", - Short: `The size (number of nodes) of the pool`, + Short: `Size (number of nodes) of the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "pools.{index}.min-size", - Short: `The minimum size of the pool`, + Short: `Minimum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.max-size", - Short: `The maximum size of the pool`, + Short: `Maximum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.container-runtime", - Short: `The container runtime for the nodes of the pool`, + Short: `Container runtime for the nodes of the pool`, Required: false, Deprecated: false, Positional: false, @@ -365,21 +357,21 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.autohealing", - Short: `The enablement of the autohealing feature for the pool`, + Short: `Defines whether the autohealing feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.tags.{index}", - Short: `The tags associated with the pool`, + Short: `Tags associated with the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.kubelet-args.{key}", - Short: `The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, + Short: `Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, Required: false, Deprecated: false, Positional: false, @@ -400,14 +392,14 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.zone", - Short: `The Zone in which the Pool's node will be spawn in`, + Short: `Zone in which the pool's nodes will be spawned`, Required: false, Deprecated: false, Positional: false, }, { Name: "pools.{index}.root-volume-type", - Short: `The system volume disk type`, + Short: `System volume disk type`, Required: false, Deprecated: false, Positional: false, @@ -415,7 +407,7 @@ func k8sClusterCreate() *core.Command { }, { Name: "pools.{index}.root-volume-size", - Short: `The system volume disk size`, + Short: `System volume disk size`, Required: false, Deprecated: false, Positional: false, @@ -501,14 +493,14 @@ func k8sClusterCreate() *core.Command { }, { Name: "auto-upgrade.maintenance-window.start-hour", - Short: `The start hour of the 2-hour maintenance window`, + Short: `Start time of the two-hour maintenance window`, Required: false, Deprecated: false, Positional: false, }, { Name: "auto-upgrade.maintenance-window.day", - Short: `The day of the week for the maintenance window`, + Short: `Day of the week for the maintenance window`, Required: false, Deprecated: false, Positional: false, @@ -572,7 +564,7 @@ func k8sClusterCreate() *core.Command { }, { Name: "open-id-connect-config.required-claim.{index}", - Short: `Multiple key=value pairs that describes a required claim in the ID Token`, + Short: `Multiple key=value pairs that describes a required claim in the ID token`, Required: false, Deprecated: false, Positional: false, @@ -610,8 +602,8 @@ func k8sClusterCreate() *core.Command { func k8sClusterGet() *core.Command { return &core.Command{ - Short: `Get a cluster`, - Long: `This method allows to get details about a specific Kubernetes cluster.`, + Short: `Get specific cluster information`, + Long: `Get details about a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "cluster", Verb: "get", @@ -637,7 +629,7 @@ func k8sClusterGet() *core.Command { }, Examples: []*core.Example{ { - Short: "Get a given cluster", + Short: "Get a cluster information", Raw: `scw k8s cluster get 11111111-1111-1111-111111111111`, }, }, @@ -647,7 +639,7 @@ func k8sClusterGet() *core.Command { func k8sClusterUpdate() *core.Command { return &core.Command{ Short: `Update a cluster`, - Long: `This method allows to update a specific Kubernetes cluster. Note that this method is not made to upgrade a Kubernetes cluster.`, + Long: `Update a specific Kubernetes cluster. Note that this method is designed to update details such as name, description, tags and configuration. However, you cannot upgrade a cluster with this method. To do so, use the dedicated endpoint.`, Namespace: "k8s", Resource: "cluster", Verb: "update", @@ -656,28 +648,28 @@ func k8sClusterUpdate() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster to update`, + Short: `ID of the cluster to update`, Required: true, Deprecated: false, Positional: true, }, { Name: "name", - Short: `The new name of the cluster`, + Short: `New external name of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "description", - Short: `The new description of the cluster`, + Short: `New description of the cluster`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The new tags associated with the cluster`, + Short: `New tags associated with the cluster`, Required: false, Deprecated: false, Positional: false, @@ -756,14 +748,14 @@ func k8sClusterUpdate() *core.Command { }, { Name: "enable-dashboard", - Short: `The new value of the Kubernetes Dashboard enablement`, + Short: `New value of the Kubernetes Dashboard enablement`, Required: false, Deprecated: true, Positional: false, }, { Name: "ingress", - Short: `The new Ingress Controller for the cluster`, + Short: `New Ingress Controller for the cluster`, Required: false, Deprecated: true, Positional: false, @@ -778,14 +770,14 @@ func k8sClusterUpdate() *core.Command { }, { Name: "auto-upgrade.maintenance-window.start-hour", - Short: `The start hour of the 2-hour maintenance window`, + Short: `Start time of the two-hour maintenance window`, Required: false, Deprecated: false, Positional: false, }, { Name: "auto-upgrade.maintenance-window.day", - Short: `The day of the week for the maintenance window`, + Short: `Day of the week for the maintenance window`, Required: false, Deprecated: false, Positional: false, @@ -849,7 +841,7 @@ func k8sClusterUpdate() *core.Command { }, { Name: "open-id-connect-config.required-claim.{index}", - Short: `Multiple key=value pairs that describes a required claim in the ID Token`, + Short: `Multiple key=value pairs that describes a required claim in the ID token`, Required: false, Deprecated: false, Positional: false, @@ -873,11 +865,11 @@ func k8sClusterUpdate() *core.Command { }, Examples: []*core.Example{ { - Short: "Enable dashboard on a given cluster", + Short: "Enable dashboard on a cluster", Raw: `scw k8s cluster update 11111111-1111-1111-111111111111 enable-dashboard=true`, }, { - Short: "Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a given cluster", + Short: "Add TTLAfterFinished and ServiceNodeExclusion as feature gates on a cluster", Raw: `scw k8s cluster update 11111111-1111-1111-111111111111 feature-gates.0=TTLAfterFinished feature-gates.1=ServiceNodeExclusion`, }, }, @@ -887,7 +879,7 @@ func k8sClusterUpdate() *core.Command { func k8sClusterDelete() *core.Command { return &core.Command{ Short: `Delete a cluster`, - Long: `This method allows to delete a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster.`, + Long: `Deletes a specific cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancers or Block Volumes that are associated with the cluster.`, Namespace: "k8s", Resource: "cluster", Verb: "delete", @@ -896,7 +888,7 @@ func k8sClusterDelete() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster to delete`, + Short: `ID of the cluster to delete`, Required: true, Deprecated: false, Positional: true, @@ -920,7 +912,7 @@ func k8sClusterDelete() *core.Command { }, Examples: []*core.Example{ { - Short: "Delete a given cluster", + Short: "Delete a cluster", Raw: `scw k8s cluster delete 11111111-1111-1111-111111111111`, }, }, @@ -930,7 +922,7 @@ func k8sClusterDelete() *core.Command { func k8sClusterUpgrade() *core.Command { return &core.Command{ Short: `Upgrade a cluster`, - Long: `This method allows to upgrade a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version.`, + Long: `Upgrades a specific Kubernetes cluster and/or its associated pools to a specific and supported Kubernetes version.`, Namespace: "k8s", Resource: "cluster", Verb: "upgrade", @@ -939,21 +931,21 @@ func k8sClusterUpgrade() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster to upgrade`, + Short: `ID of the cluster to upgrade`, Required: true, Deprecated: false, Positional: true, }, { Name: "version", - Short: `The new Kubernetes version of the cluster`, + Short: `New Kubernetes version of the cluster`, Required: true, Deprecated: false, Positional: false, }, { Name: "upgrade-pools", - Short: `The enablement of the pools upgrade`, + Short: `Enablement of the pools upgrade`, Required: false, Deprecated: false, Positional: false, @@ -970,11 +962,11 @@ func k8sClusterUpgrade() *core.Command { }, Examples: []*core.Example{ { - Short: "Upgrade a given cluster to Kubernetes version 1.24.7 (without upgrading the pools)", + Short: "Upgrade a cluster to Kubernetes version 1.24.7 (without upgrading the pools)", Raw: `scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7`, }, { - Short: "Upgrade a given cluster to Kubernetes version 1.24.7 (and upgrade the pools)", + Short: "Upgrade a cluster to Kubernetes version 1.24.7 (and upgrade the pools)", Raw: `scw k8s cluster upgrade 11111111-1111-1111-111111111111 version=1.24.7 upgrade-pools=true`, }, }, @@ -984,7 +976,7 @@ func k8sClusterUpgrade() *core.Command { func k8sClusterListAvailableVersions() *core.Command { return &core.Command{ Short: `List available versions for a cluster`, - Long: `This method allows to list the versions that a specific Kubernetes cluster is allowed to upgrade to. Note that it will be every patch version greater than the actual one as well a one minor version ahead of the actual one. Upgrades skipping a minor version will not work.`, + Long: `List the versions that a specific Kubernetes cluster is allowed to upgrade to. Results will comprise every patch version greater than the current patch, as well as one minor version ahead of the current version. Any upgrade skipping a minor version will not work.`, Namespace: "k8s", Resource: "cluster", Verb: "list-available-versions", @@ -993,7 +985,7 @@ func k8sClusterListAvailableVersions() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster which the available Kuberentes versions will be listed from`, + Short: `ID of the cluster which the available Kuberentes versions will be listed from`, Required: true, Deprecated: false, Positional: true, @@ -1010,7 +1002,7 @@ func k8sClusterListAvailableVersions() *core.Command { }, Examples: []*core.Example{ { - Short: "List all available versions for a given cluster to upgrade to", + Short: "List all available versions for a cluster to upgrade to", Raw: `scw k8s cluster list-available-versions 11111111-1111-1111-111111111111`, }, }, @@ -1034,7 +1026,7 @@ func k8sClusterListAvailableVersions() *core.Command { func k8sClusterResetAdminToken() *core.Command { return &core.Command{ Short: `Reset the admin token of a cluster`, - Long: `This method allows to reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable after) and create a new one. Note that the redownload of the kubeconfig will be necessary to keep interacting with the cluster (if the old admin token was used).`, + Long: `Reset the admin token for a specific Kubernetes cluster. This will invalidate the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to redownload kubeconfig in order to keep interacting with the cluster.`, Namespace: "k8s", Resource: "cluster", Verb: "reset-admin-token", @@ -1043,7 +1035,7 @@ func k8sClusterResetAdminToken() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster of which the admin token will be renewed`, + Short: `ID of the cluster on which the admin token will be renewed`, Required: true, Deprecated: false, Positional: true, @@ -1066,7 +1058,7 @@ func k8sClusterResetAdminToken() *core.Command { }, Examples: []*core.Example{ { - Short: "Reset the admin token for a given cluster", + Short: "Reset the admin token for a cluster", Raw: `scw k8s cluster reset-admin-token 11111111-1111-1111-111111111111`, }, }, @@ -1076,7 +1068,7 @@ func k8sClusterResetAdminToken() *core.Command { func k8sPoolList() *core.Command { return &core.Command{ Short: `List all the pools in a cluster`, - Long: `This method allows to list all the existing pools for a specific Kubernetes cluster.`, + Long: `List all the existing pools for a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "pool", Verb: "list", @@ -1085,14 +1077,14 @@ func k8sPoolList() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster from which the pools will be listed from`, + Short: `ID of the cluster from which the pools will be listed from`, Required: true, Deprecated: false, Positional: false, }, { Name: "order-by", - Short: `The sort order of the returned pools`, + Short: `Sort order of the returned pools`, Required: false, Deprecated: false, Positional: false, @@ -1100,14 +1092,14 @@ func k8sPoolList() *core.Command { }, { Name: "name", - Short: `The name on which to filter the returned pools`, + Short: `Name on which to filter the returned pools`, Required: false, Deprecated: false, Positional: false, }, { Name: "status", - Short: `The status on which to filter the returned pools`, + Short: `Status on which to filter the returned pools`, Required: false, Deprecated: false, Positional: false, @@ -1134,19 +1126,19 @@ func k8sPoolList() *core.Command { }, Examples: []*core.Example{ { - Short: "List all pools for a given cluster", + Short: "List all pools for a cluster", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111`, }, { - Short: "List all scaling pools for a given cluster", + Short: "List all scaling pools for a cluster", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111 status=scaling`, }, { - Short: "List all pools for a given cluster that contain the word foo in the pool name", + Short: "List all pools for a cluster that contains the word 'foo' in the pool name", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111 name=foo`, }, { - Short: "List all pools for a given cluster and order them by ascending creation date", + Short: "List all pools for a cluster and order them by ascending creation date", Raw: `scw k8s pool list cluster-id=11111111-1111-1111-111111111111 order-by=created_at_asc`, }, }, @@ -1215,7 +1207,7 @@ func k8sPoolList() *core.Command { func k8sPoolCreate() *core.Command { return &core.Command{ Short: `Create a new pool in a cluster`, - Long: `This method allows to create a new pool in a specific Kubernetes cluster.`, + Long: `Create a new pool in a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "pool", Verb: "create", @@ -1224,14 +1216,14 @@ func k8sPoolCreate() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The ID of the cluster in which the pool will be created`, + Short: `ID of the cluster in which the pool will be created`, Required: true, Deprecated: false, Positional: false, }, { Name: "name", - Short: `The name of the pool`, + Short: `Name of the pool`, Required: true, Deprecated: false, Positional: false, @@ -1239,49 +1231,49 @@ func k8sPoolCreate() *core.Command { }, { Name: "node-type", - Short: `The node type is the type of Scaleway Instance wanted for the pool`, + Short: `Node type is the type of Scaleway Instance wanted for the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "placement-group-id", - Short: `The placement group ID in which all the nodes of the pool will be created`, + Short: `Placement group ID in which all the nodes of the pool will be created`, Required: false, Deprecated: false, Positional: false, }, { Name: "autoscaling", - Short: `The enablement of the autoscaling feature for the pool`, + Short: `Defines whether the autoscaling feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "size", - Short: `The size (number of nodes) of the pool`, + Short: `Size (number of nodes) of the pool`, Required: true, Deprecated: false, Positional: false, }, { Name: "min-size", - Short: `The minimum size of the pool`, + Short: `Minimum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "max-size", - Short: `The maximum size of the pool`, + Short: `Maximum size of the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "container-runtime", - Short: `The container runtime for the nodes of the pool`, + Short: `Container runtime for the nodes of the pool`, Required: false, Deprecated: false, Positional: false, @@ -1289,21 +1281,21 @@ func k8sPoolCreate() *core.Command { }, { Name: "autohealing", - Short: `The enablement of the autohealing feature for the pool`, + Short: `Defines whether the autohealing feature is enabled for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The tags associated with the pool`, + Short: `Tags associated with the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "kubelet-args.{key}", - Short: `The Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, + Short: `Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, Required: false, Deprecated: false, Positional: false, @@ -1322,14 +1314,14 @@ func k8sPoolCreate() *core.Command { }, { Name: "zone", - Short: `The Zone in which the Pool's node will be spawn in`, + Short: `Zone in which the pool's nodes will be spawned`, Required: false, Deprecated: false, Positional: false, }, { Name: "root-volume-type", - Short: `The system volume disk type`, + Short: `System volume disk type`, Required: false, Deprecated: false, Positional: false, @@ -1337,7 +1329,7 @@ func k8sPoolCreate() *core.Command { }, { Name: "root-volume-size", - Short: `The system volume disk size`, + Short: `System volume disk size`, Required: false, Deprecated: false, Positional: false, @@ -1354,15 +1346,15 @@ func k8sPoolCreate() *core.Command { }, Examples: []*core.Example{ { - Short: "Create a pool named bar with 2 DEV1-XL on a given cluster", + Short: "Create a pool named bar with 2 DEV1-XL on a cluster", Raw: `scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=bar node-type=DEV1-XL size=2`, }, { - Short: "Create a pool named fish with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a given cluster", + Short: "Create a pool named 'fish' with 5 GP1-L with autoscaling enabled within 0 and 10 nodes, autohealing enabled, and containerd as the container runtime on a cluster", Raw: `scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=fish node-type=GP1-L size=5 min-size=0 max-size=10 autoscaling=true autohealing=true container-runtime=containerd`, }, { - Short: "Create a tagged pool named turtle with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a given cluster", + Short: "Create a tagged pool named 'turtle' with 1 GP1-S which is using the already created placement group 2222222222222-2222-222222222222 for all the nodes in the pool on a cluster", Raw: `scw k8s pool create cluster-id=11111111-1111-1111-111111111111 name=turtle node-type=GP1-S size=1 placement-group-id=2222222222222-2222-222222222222 tags.0=turtle tags.1=placement-group`, }, }, @@ -1372,7 +1364,7 @@ func k8sPoolCreate() *core.Command { func k8sPoolGet() *core.Command { return &core.Command{ Short: `Get a pool in a cluster`, - Long: `This method allows to get details about a specific pool.`, + Long: `Get details about a specific pool in a Kubernetes cluster.`, Namespace: "k8s", Resource: "pool", Verb: "get", @@ -1381,7 +1373,7 @@ func k8sPoolGet() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the requested pool`, + Short: `ID of the requested pool`, Required: true, Deprecated: false, Positional: true, @@ -1408,7 +1400,7 @@ func k8sPoolGet() *core.Command { func k8sPoolUpgrade() *core.Command { return &core.Command{ Short: `Upgrade a pool in a cluster`, - Long: `This method allows to upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster.`, + Long: `Upgrade the Kubernetes version of a specific pool. Note that this will work when the targeted version is the same than the version of the cluster.`, Namespace: "k8s", Resource: "pool", Verb: "upgrade", @@ -1417,14 +1409,14 @@ func k8sPoolUpgrade() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the pool to upgrade`, + Short: `ID of the pool to upgrade`, Required: true, Deprecated: false, Positional: true, }, { Name: "version", - Short: `The new Kubernetes version for the pool`, + Short: `New Kubernetes version for the pool`, Required: true, Deprecated: false, Positional: false, @@ -1441,7 +1433,7 @@ func k8sPoolUpgrade() *core.Command { }, Examples: []*core.Example{ { - Short: "Upgrade a given pool to the Kubernetes version 1.24.7", + Short: "Upgrade a specific pool to the Kubernetes version 1.24.7", Raw: `scw k8s pool upgrade 11111111-1111-1111-111111111111 version=1.24.7`, }, }, @@ -1451,7 +1443,7 @@ func k8sPoolUpgrade() *core.Command { func k8sPoolUpdate() *core.Command { return &core.Command{ Short: `Update a pool in a cluster`, - Long: `This method allows to update some attributes of a specific pool such as the size, the autoscaling enablement, the tags, ...`, + Long: `Update attributes of a specific pool, such as size, autoscaling settings, and tags.`, Namespace: "k8s", Resource: "pool", Verb: "update", @@ -1460,56 +1452,56 @@ func k8sPoolUpdate() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the pool to update`, + Short: `ID of the pool to update`, Required: true, Deprecated: false, Positional: true, }, { Name: "autoscaling", - Short: `The new value for the enablement of autoscaling for the pool`, + Short: `New value for the enablement of autoscaling for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "size", - Short: `The new size for the pool`, + Short: `New size for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "min-size", - Short: `The new minimun size for the pool`, + Short: `New minimun size for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "max-size", - Short: `The new maximum size for the pool`, + Short: `New maximum size for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "autohealing", - Short: `The new value for the enablement of autohealing for the pool`, + Short: `New value for the enablement of autohealing for the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "tags.{index}", - Short: `The new tags associated with the pool`, + Short: `New tags associated with the pool`, Required: false, Deprecated: false, Positional: false, }, { Name: "kubelet-args.{key}", - Short: `The new Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, + Short: `New Kubelet arguments to be used by this pool. Note that this feature is to be considered as experimental`, Required: false, Deprecated: false, Positional: false, @@ -1556,7 +1548,7 @@ func k8sPoolUpdate() *core.Command { func k8sPoolDelete() *core.Command { return &core.Command{ Short: `Delete a pool in a cluster`, - Long: `This method allows to delete a specific pool from a cluster, deleting all the nodes associated with it.`, + Long: `Delete a specific pool from a cluster. All of the pool's nodes will also be deleted.`, Namespace: "k8s", Resource: "pool", Verb: "delete", @@ -1565,7 +1557,7 @@ func k8sPoolDelete() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "pool-id", - Short: `The ID of the pool to delete`, + Short: `ID of the pool to delete`, Required: true, Deprecated: false, Positional: true, @@ -1582,7 +1574,7 @@ func k8sPoolDelete() *core.Command { }, Examples: []*core.Example{ { - Short: "Delete a given pool", + Short: "Delete a specific pool", Raw: `scw k8s pool delete 11111111-1111-1111-111111111111`, }, }, @@ -1592,7 +1584,7 @@ func k8sPoolDelete() *core.Command { func k8sNodeList() *core.Command { return &core.Command{ Short: `List all the nodes in a cluster`, - Long: `This method allows to list all the existing nodes for a specific Kubernetes cluster.`, + Long: `List all the existing nodes for a specific Kubernetes cluster.`, Namespace: "k8s", Resource: "node", Verb: "list", @@ -1601,21 +1593,21 @@ func k8sNodeList() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "cluster-id", - Short: `The cluster ID from which the nodes will be listed from`, + Short: `Cluster ID from which the nodes will be listed from`, Required: true, Deprecated: false, Positional: false, }, { Name: "pool-id", - Short: `The pool ID on which to filter the returned nodes`, + Short: `Pool ID on which to filter the returned nodes`, Required: false, Deprecated: false, Positional: false, }, { Name: "order-by", - Short: `The sort order of the returned nodes`, + Short: `Sort order of the returned nodes`, Required: false, Deprecated: false, Positional: false, @@ -1623,14 +1615,14 @@ func k8sNodeList() *core.Command { }, { Name: "name", - Short: `The name on which to filter the returned nodes`, + Short: `Name on which to filter the returned nodes`, Required: false, Deprecated: false, Positional: false, }, { Name: "status", - Short: `The status on which to filter the returned nodes`, + Short: `Status on which to filter the returned nodes`, Required: false, Deprecated: false, Positional: false, @@ -1657,15 +1649,15 @@ func k8sNodeList() *core.Command { }, Examples: []*core.Example{ { - Short: "List all the nodes in the given cluster", + Short: "List all the nodes in the cluster", Raw: `scw k8s node list cluster-id=11111111-1111-1111-111111111111`, }, { - Short: "List all the nodes in the pool 2222222222222-2222-222222222222 in the given cluster", + Short: "List all the nodes in the pool 2222222222222-2222-222222222222 in the cluster", Raw: `scw k8s node list cluster-id=11111111-1111-1111-111111111111 pool-id=2222222222222-2222-222222222222`, }, { - Short: "List all ready nodes in the given cluster", + Short: "List all ready nodes in the cluster", Raw: `scw k8s node list cluster-id=11111111-1111-1111-111111111111 status=ready`, }, }, @@ -1707,7 +1699,7 @@ func k8sNodeList() *core.Command { func k8sNodeGet() *core.Command { return &core.Command{ Short: `Get a node in a cluster`, - Long: `This method allows to get details about a specific Kubernetes node.`, + Long: `Get details about a specific Kubernetes node.`, Namespace: "k8s", Resource: "node", Verb: "get", @@ -1716,7 +1708,7 @@ func k8sNodeGet() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the requested node`, + Short: `ID of the requested node`, Required: true, Deprecated: false, Positional: true, @@ -1733,7 +1725,7 @@ func k8sNodeGet() *core.Command { }, Examples: []*core.Example{ { - Short: "Get a given node", + Short: "Get a node", Raw: `scw k8s node get 11111111-1111-1111-111111111111`, }, }, @@ -1743,7 +1735,7 @@ func k8sNodeGet() *core.Command { func k8sNodeReplace() *core.Command { return &core.Command{ Short: `Replace a node in a cluster`, - Long: `This method allows to replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, + Long: `Replace a specific node. The node will be set cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Then the node will be deleted, and a new one will be created after the deletion. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, Namespace: "k8s", Resource: "node", Verb: "replace", @@ -1752,7 +1744,7 @@ func k8sNodeReplace() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the node to replace`, + Short: `ID of the node to replace`, Required: true, Deprecated: false, Positional: true, @@ -1769,7 +1761,7 @@ func k8sNodeReplace() *core.Command { }, Examples: []*core.Example{ { - Short: "Replace a given node", + Short: "Replace a node", Raw: `scw k8s node replace 11111111-1111-1111-111111111111`, }, }, @@ -1779,7 +1771,7 @@ func k8sNodeReplace() *core.Command { func k8sNodeReboot() *core.Command { return &core.Command{ Short: `Reboot a node in a cluster`, - Long: `This method allows to reboot a specific node. This node will frist be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and reschedule onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, + Long: `Reboot a specific node. This node will first be cordoned, meaning that scheduling will be disabled. Then the existing pods on the node will be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster, for instance), you may experience some disruption of your applications.`, Namespace: "k8s", Resource: "node", Verb: "reboot", @@ -1788,7 +1780,7 @@ func k8sNodeReboot() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the node to reboot`, + Short: `ID of the node to reboot`, Required: true, Deprecated: false, Positional: true, @@ -1805,7 +1797,7 @@ func k8sNodeReboot() *core.Command { }, Examples: []*core.Example{ { - Short: "Reboot a given node", + Short: "Reboot a node", Raw: `scw k8s node reboot 11111111-1111-1111-111111111111`, }, }, @@ -1815,7 +1807,7 @@ func k8sNodeReboot() *core.Command { func k8sNodeDelete() *core.Command { return &core.Command{ Short: `Delete a node in a cluster`, - Long: `This method allows to delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one node cluster for instance), you may experience some disruption of your applications.`, + Long: `Delete a specific node. Note that when there is not enough space to reschedule all the pods (in a one-node cluster for instance), you may experience some disruption of your applications.`, Namespace: "k8s", Resource: "node", Verb: "delete", @@ -1824,7 +1816,7 @@ func k8sNodeDelete() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "node-id", - Short: `The ID of the node to replace`, + Short: `ID of the node to replace`, Required: true, Deprecated: false, Positional: true, @@ -1855,15 +1847,15 @@ func k8sNodeDelete() *core.Command { }, Examples: []*core.Example{ { - Short: "Delete a given node", + Short: "Delete a node", Raw: `scw k8s node delete 11111111-1111-1111-111111111111`, }, { - Short: "Delete a given node without evicting workloads", + Short: "Delete a node without evicting workloads", Raw: `scw k8s node delete 11111111-1111-1111-111111111111 skip-drain=true`, }, { - Short: "Replace a given node by a new one", + Short: "Replace a node by a new one", Raw: `scw k8s node delete 11111111-1111-1111-111111111111 replace=true`, }, }, @@ -1873,7 +1865,7 @@ func k8sNodeDelete() *core.Command { func k8sVersionList() *core.Command { return &core.Command{ Short: `List all available versions`, - Long: `This method allows to list all available versions for the creation of a new Kubernetes cluster.`, + Long: `List all available versions for the creation of a new Kubernetes cluster.`, Namespace: "k8s", Resource: "version", Verb: "list", @@ -1925,7 +1917,7 @@ func k8sVersionList() *core.Command { func k8sVersionGet() *core.Command { return &core.Command{ Short: `Get details about a specific version`, - Long: `This method allows to get a specific Kubernetes version and the details about the version.`, + Long: `Get a specific Kubernetes version and the details about the version.`, Namespace: "k8s", Resource: "version", Verb: "get", @@ -1934,7 +1926,7 @@ func k8sVersionGet() *core.Command { ArgSpecs: core.ArgSpecs{ { Name: "version-name", - Short: `The requested version name`, + Short: `Requested version name`, Required: true, Deprecated: false, Positional: true,