diff --git a/_topic_maps/_topic_map.yml b/_topic_maps/_topic_map.yml index 220b5cff51a8..b35d157db5a9 100644 --- a/_topic_maps/_topic_map.yml +++ b/_topic_maps/_topic_map.yml @@ -1819,6 +1819,18 @@ Topics: Distros: openshift-origin - Name: Cluster Operators reference File: operator-reference +- Name: OLM v1 (Technology Preview) + Dir: olm_v1 + Distros: openshift-origin,openshift-enterprise + Topics: + - Name: About OLM v1 + File: index + - Name: Packaging format + File: olmv1-packaging-format + - Name: Managing catalogs + File: olmv1-managing-catalogs + - Name: Installing an Operator from a catalog + File: olmv1-installing-an-operator-from-a-catalog --- Name: CI/CD Dir: cicd diff --git a/modules/olm-rukpak-about.adoc b/modules/olm-rukpak-about.adoc index 9107e298ea03..5fb480cd9fa7 100644 --- a/modules/olm-rukpak-about.adoc +++ b/modules/olm-rukpak-about.adoc @@ -4,12 +4,17 @@ :_content-type: CONCEPT [id="olm-rukpak-about_{context}"] +ifeval::["{context}" == "olm-packaging-format"] = RukPak (Technology Preview) :FeatureName: RukPak include::snippets/technology-preview.adoc[] {product-title} 4.12 introduces the _platform Operator_ type as a Technology Preview feature. The platform Operator mechanism relies on the RukPak component, also introduced in {product-title} 4.12, and its resources to manage content. +endif::[] +ifeval::["{context}" == "olmv1-packaging-format"] += RukPak +endif::[] RukPak consists of a series of controllers, known as _provisioners_, that install and manage content on a Kubernetes cluster. RukPak also provides two primary APIs: `Bundle` and `BundleDeployment`. These components work together to bring content onto the cluster and install it, generating resources within the cluster. diff --git a/modules/olmv1-about-catalogs.adoc b/modules/olmv1-about-catalogs.adoc new file mode 100644 index 000000000000..1932116e0330 --- /dev/null +++ b/modules/olmv1-about-catalogs.adoc @@ -0,0 +1,12 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: CONCEPT + +[id="olmv1-about-catalogs_{context}"] += About catalogs in OLM 1.0 + +Operator Lifecycle Manager (OLM) 1.0 introduces the catalogd component to the OLM suite of microservices. Catalogd is a Kubernetes extension that unpacks file-based catalog content for on-cluster clients. Currently, catalogd unpacks catalog content that is packaged and distributed as container images. + +Catalogd helps customers discover installable content by hosting catalog metadata for Kubernetes extensions, such as Operators and controllers. diff --git a/modules/olmv1-about-target-versions.adoc b/modules/olmv1-about-target-versions.adoc new file mode 100644 index 000000000000..cad4e575d4e8 --- /dev/null +++ b/modules/olmv1-about-target-versions.adoc @@ -0,0 +1,76 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: CONCEPT + +[id="olmv1-about-operator-updates_{context}"] += About target versions in OLM 1.0 + +In Operator Lifecycle Manager 1.0, cluster administrators the set target version of an Operator in the Operator's custom resource (CR). + +If the Operator's version is not specified in the CR, then OLM 1.0 defaults to the latest stable version as the target version. When updates to the Operator are published to the catalog, the Operator automatically updates to the latest stable version. + +.Example CR without a specified target version +[source,yaml] +---- +apiVersion: operators.operatorframework.io/v1alpha1 +kind: Operator +metadata: + name: quay-example +spec: + packageName: quay-operator + version: <1> +---- +<1> Leave the target version unspecified to automatically set the version to the latest stable version. + +[IMPORTANT] +==== +What about a specified channel with an unspecified version? +==== + +If you specify the Operator's target version in the CR, OLM 1.0 installs the specified version. When the target version is specified in the Operator's CR, OLM does not update the target version. + +.Example CR with the target version specified +[source,yaml] +---- +apiVersion: operators.operatorframework.io/v1alpha1 +kind: Operator +metadata: + name: quay-example +spec: + packageName: quay-operator + channel: stable-3.8 + version: 3.8.12 +---- + +Edit the target version to change the installed version of an Operator. As long as the update is supported by the skip ranges defined in the catalog, you can update across channels and even downgrade an Operator. + +.Example CR with an updated target version +[source,yaml] +---- +apiVersion: operators.operatorframework.io/v1alpha1 +kind: Operator +metadata: + name: quay-example +spec: + packageName: quay-operator + channel: stable-3.9 + version: 3.9.1 +---- + +You can inspect an Operator's catalog contents, including supported versions and skip ranges, by running the following command: + +.Command syntax +[source,terminal] +---- +$ oc get package - -o yaml +---- + +After a CR is created or updated, run the following command to create or configure the Operator: + +.Command syntax +[source,terminal] +---- +$ oc apply -f .yaml +---- diff --git a/modules/olmv1-adding-a-catalog.adoc b/modules/olmv1-adding-a-catalog.adoc new file mode 100644 index 000000000000..ac4d447100c0 --- /dev/null +++ b/modules/olmv1-adding-a-catalog.adoc @@ -0,0 +1,75 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: PRODCEDURE + +[id="olmv1-adding-a-catalog-to-a-cluster_{context}"] += Adding a catalog to a cluster + +To add a catalog to a cluster, create a catalog custom resource (CR) and apply it to the cluster. + +.Procedure + +. Create a catalog custom resource (CR), similar to the following example: ++ +.Example `redhat-operators.yaml` +[source,yaml] +---- +apiVersion: catalogd.operatorframework.io/v1alpha1 +kind: Catalog +metadata: + name: redhat-operators +spec: + source: + type: image + image: + ref: registry.redhat.io/redhat/redhat-operator-index:v{product-version} <1> +---- +<1> Specify the catalog's image in the `spec.source.image` field. + +. Run the following command to add the catalog to your cluster. By default, your catalog is installed in the `openshift-catalogd` namespace. ++ +[source,terminal] +---- +$ oc apply -f redhat-operators.yaml +---- ++ +.Example output +[source,text] +---- +catalog.catalogd.operatorframework.io/redhat-operators created +---- + +.Verification + +* Run the following commands to verify the status of your catalog: + +.. Run the following command to check if your catalog is available: ++ +[source,terminal] +---- +$ oc get catalog +---- ++ +.Example output +[source,text] +---- +NAME AGE +redhat-operators 20s +---- + +.. Run the following command to check the status of your catalog pods: ++ +[source,terminal] +---- +$ oc get pod -n openshift-catalogd +---- ++ +.Example output +[source,text] +---- +NAME READY STATUS RESTARTS AGE +catalogd-controller-manager-5b8844bbc4-ps76j 2/2 Running 0 60m +redhat-operators 0/1 Completed 0 47s +---- diff --git a/modules/olmv1-catalog-plain.adoc b/modules/olmv1-catalog-plain.adoc new file mode 100644 index 000000000000..d61ba2729541 --- /dev/null +++ b/modules/olmv1-catalog-plain.adoc @@ -0,0 +1,263 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-managing-catalogs.adoc + +:_content-type: CONCEPT + +[id="olmv1-building-plain-bundle-image-source_{context}"] += Building a plain bundle image from an image source + +The Operator Controller currently only supports installing plain bundles created from a _plain bundle image_. + +.Procedure + +. Verify that your Kubernetes manifests are in a flat directory at the root of your project similar to the following example: ++ +[source,terminal] +---- +$ tree manifests +---- ++ +.Example output +[source,terminal] +---- +manifests +├── namespace.yaml +├── service_account.yaml +├── cluster_role.yaml +├── cluster_role_binding.yaml +└── deployment.yaml +---- + +.. If you are using link:https://kustomize.io[Kustomize] to build your manifests, you must redirect the output to one or more files under the `manifests/` directory: ++ +[source,terminal] +---- +$ kustomize build templates > manifests/manifests.yaml +---- +// For more information, see [Building a plain bundle > Prerequisites](https://github.com/ator-framework/rukpak/blob/main/docs/bundles/plain.md#prerequisites). + +. Create a Dockerfile at the root of your project: ++ +[source,terminal] +---- +$ touch plainbundle.Dockerfile +---- + +. Make the following changes to your Dockerfile: ++ +.Example `Dockerfile` +---- + FROM scratch + ADD manifests /manifests +---- ++ +[NOTE] +==== +Use the `FROM scratch` directive to make the size of the image smaller. No other files or directories are required in the bundle image. +==== + +. Build an OCI-compliant image using your preferred build tool, similar to the following example. You must use an image tag that references a repository where you have push access privileges. ++ +[source,terminal] +---- +$ docker build -f plainbundle.Dockerfile -t \ + quay.io//: . +---- + +. Push the image to your remote registry: ++ +[source,terminal] +---- +$ docker push quay.io//: +---- + +.Additional resources + +* link:https://github.com/operator-framework/olm-docs/blob/master/content/en/docs/Reference/file-based-catalogs.md[File-based catalog bundle schema] +* link:https://github.com/opencontainers/image-spec#oci-image-format-specification[OCI image specification] +* link:https://github.com/operator-framework/rukpak/blob/main/docs/bundles/plain.md#image-source[RukPak > Building a plain bundle > Image source] +* link:https://github.com/operator-framework/rukpak/blob/main/docs/sources/image.md#private-image-registries[RukPak > Sources > Images > Private image registries] + +[id="olmv1-creating-fbc_{context}"] += Creating a file-based catalog + +If you do not have a file-based catalog, you must perform the following steps to initialize the catalog. + +.Procedure + +. Create a directory for the catalog by running the following command: ++ +[source,terminal] +---- +$ mkdir +---- + +. In the same directory level, create a Dockerfile that can build a catalog image: +[source,terminal] ++ +---- +$ touch Dockerfile +---- ++ +The Dockerfile must be in the same parent directory as the catalog directory that you created in the previous step: ++ +.Example directory structure +[source,terminal] +---- +. +├── +└── .Dockerfile +---- + +. Make the following changes to your Dockerfile: ++ +.Example Dockerfile +[source,yaml] +---- +FROM scratch +ADD /configs +---- ++ +[NOTE] +==== +Use the `FROM scratch` directive to make the size of the image smaller. No other files or directories are required in the bundle image. +==== + +. Populate the catalog with the package definition for your Operator by running the `opm init` command: ++ +[source,terminal] +---- +$ opm init \ + --output json \ + > /index.json +---- ++ +This command generates an `olm.package` declarative config blob in the specified catalog configuration file. + +[id="olmv1-adding-plain-bundle-to-fbc_{context}"] += Adding a plain bundle to a file-based catalog + +Currently, the `opm render` command does not support adding plain bundles to catalogs. You must manually add plain bundles to your file-based catalog, as shown in the following procedure. + +.Procedure + +. Verify that your catalog's `index.json` or `index.yaml` file is similar to the following example: ++ +.Example `/index.json` file +[source,json] +---- +{ + { + "schema": "olm.package", + "name": "", + } +} +---- + +. To create an `olm.bundle` blob, edit your `index.json` or `index.yaml` file, similar to the following example: ++ +.Example `/index.json` file with `olm.bundle` blob +[source,json] +---- +{ + "schema": "olm.bundle", + "name": ".v", + "package": "", + "image": "quay.io//:", + "properties": [ + { + "type": "olm.package", + "value": { + "packageName": "", + "version": "" + } + }, + { + "type": "olm.bundle.mediatype", + "value": "plain+v0" + } + ] +} +---- + +. To create an `olm.channel` blob, edit your `index.json` or `index.yaml` file, similar to the following example: ++ +.Example `/index.json` file with `olm.channel` blob +[source,json] +---- +{ + "schema": "olm.channel", + "name": "", + "package": "", + "entries": [ + { + "name": ".v" + } + ] +} +---- + +// Please refer to [channel naming conventions](https://olm.operatorframework.io/docs/best-practices/channel-naming/) for choosing the . An example of the is `candidate-v0`. + +.Verification + +* Open your `index.json` or `index.yaml` file and ensure it is similar to the following example: ++ +.Example `/index.json` file +[source,json] +---- +{ + "schema": "olm.package", + "name": "example-operator", +} +{ + "schema": "olm.bundle", + "name": "example-operator.v0.0.1", + "package": "example-operator", + "image": "quay.io/rashmigottipati/example-operator-bundle:v0.0.1", + "properties": [ + { + "type": "olm.package", + "value": { + "packageName": "example-operator", + "version": "v0.0.1" + } + }, + { + "type": "olm.bundle.mediatype", + "value": "plain+v0" + } + ] +} +{ + "schema": "olm.channel", + "name": "preview", + "package": "example-operator", + "entries": [ + { + "name": "example-operator.v0.0.1" + } + ] +} +---- + +[id="olmv1-publishing-fbc_{context}"] += Building and publishing a file-based catalog + +.Procedure + +. Build your file-bsaed catalog as an image by running the following command: ++ +[source,terminal] +---- +$ docker build -f .Dockerfile -t \ + quay.io//: . +---- + +. Push your catalog image by running the following command: ++ +[source,terminal] +---- +$ docker push quay.io//: +---- \ No newline at end of file diff --git a/modules/olmv1-deleting-an-operator.adoc b/modules/olmv1-deleting-an-operator.adoc new file mode 100644 index 000000000000..378a30aa4c78 --- /dev/null +++ b/modules/olmv1-deleting-an-operator.adoc @@ -0,0 +1,60 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: PROCEDURE + +[id="olmv1-deleting-an-operator_{context}"] += Deleting an Operator + +You can delete an Operator by deleting the Operator's custom resource (CR). + +.Prerequisites + +* You have a catalog installed. +* You have an Operator installed. + +.Procedure + +* Run the following command to delete an Operator: ++ +[source,terminal] +---- +$ oc delete operator.operators.operatorframework.io quay-example +---- ++ +.Example output +[source,text] +---- +operator.operators.operatorframework.io "quay-example" deleted +---- + +.Verification + +* Run the following commands to verify that your Operator was deleted: ++ +[source,terminal] +---- +$ oc get operator.operators.operatorframework.io +---- ++ +[source,terminal] +---- +$ oc get bundledeployment +---- ++ +[source,terminal] +---- +$ oc get bundle +---- ++ +[source,terminal] +---- +$ oc get pod -n quay-operator-system +---- ++ +.Example output +[source,text] +---- +No resources found +---- diff --git a/modules/olmv1-finding-operators-to-install.adoc b/modules/olmv1-finding-operators-to-install.adoc new file mode 100644 index 000000000000..d284cf26c0ac --- /dev/null +++ b/modules/olmv1-finding-operators-to-install.adoc @@ -0,0 +1,245 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: PROCEDURE + +[id="olmv1-finding-operators-to-install_{context}"] += Finding Operators to install from a catalog to install + +After you add a catalog to your cluster, you can query the catalog to find Operators and extensions to install. + +.Prerequisite + +* You have added a catalog to your cluster. + +.Procedure + +. Run the following command to return a list of the Operators and extensikons in the catalog: ++ +[source,terminal] +---- +$ oc get packages +---- ++ +.Example output +[source,text] +---- +NAME AGE +redhat-operators-3scale-operator 5m27s +redhat-operators-advanced-cluster-management 5m27s +redhat-operators-amq-broker-rhel8 5m27s +redhat-operators-amq-online 5m27s +redhat-operators-amq-streams 5m27s +redhat-operators-amq7-interconnect-operator 5m27s +redhat-operators-ansible-automation-platform-operator 5m27s +redhat-operators-ansible-cloud-addons-operator 5m27s +redhat-operators-apicast-operator 5m27s +redhat-operators-aws-efs-csi-driver-operator 5m27s +redhat-operators-aws-load-balancer-operator 5m27s +... +---- + +. Run the following command to list versions of the Operators and extensions that are available in the catalog: ++ +[source,terminal] +---- +$ oc get bundlemetadata +---- ++ +.Example output +[source,text] +---- +NAME AGE +redhat-operators-3scale-operator.v0.10.0-mas 6m16s +redhat-operators-3scale-operator.v0.10.5 6m16s +redhat-operators-3scale-operator.v0.11.0-mas 6m16s +redhat-operators-3scale-operator.v0.11.1-mas 6m16s +redhat-operators-3scale-operator.v0.11.2-mas 6m16s +redhat-operators-3scale-operator.v0.11.3-mas 6m15s +redhat-operators-3scale-operator.v0.11.5-mas 6m15s +redhat-operators-3scale-operator.v0.11.6-mas 6m15s +redhat-operators-3scale-operator.v0.11.7-mas 6m15s +redhat-operators-3scale-operator.v0.8.0 6m15s +redhat-operators-3scale-operator.v0.8.0-0.1634606167.p 6m15s +redhat-operators-3scale-operator.v0.8.1 6m15s +redhat-operators-3scale-operator.v0.8.2 6m15s +redhat-operators-3scale-operator.v0.8.3 6m15s +redhat-operators-3scale-operator.v0.8.3-0.1645735250.p 6m15s +redhat-operators-3scale-operator.v0.8.3-0.1646619125.p 6m15s +redhat-operators-3scale-operator.v0.8.3-0.1646742992.p 6m15s +redhat-operators-3scale-operator.v0.8.3-0.1649688682.p 6m15s +redhat-operators-3scale-operator.v0.8.4 6m15s +redhat-operators-3scale-operator.v0.8.4-0.1655690146.p 6m15s +redhat-operators-3scale-operator.v0.9.0 6m15s +redhat-operators-3scale-operator.v0.9.1 6m15s +redhat-operators-3scale-operator.v0.9.1-0.1664967752.p 6m15s +redhat-operators-aap-operator.v2.4.0-0.1695086513 6m15s +redhat-operators-aap-operator.v2.4.0-0.1695086882 6m15s +redhat-operators-aca-operator.v2.4.0-0.1695086173 6m15s +redhat-operators-advanced-cluster-management.v2.8.0 6m15s +redhat-operators-advanced-cluster-management.v2.8.1 6m15s +redhat-operators-advanced-cluster-management.v2.8.2 6m15s +... +---- + +. Inspect the contents of an Operator or extension's custom resource (CR) by running the following command: ++ +[source,terminal] +---- +$ oc get package - -o yaml +---- ++ +.Example command +[source,text] +---- +$ oc get package redhat-operators-quay-operator -o yaml +---- ++ +.Example output +[source,text] +---- +apiVersion: catalogd.operatorframework.io/v1alpha1 +kind: Package +metadata: + creationTimestamp: "2023-10-06T01:14:04Z" + generation: 1 + labels: + catalog: redhat-operators + name: redhat-operators-quay-operator + ownerReferences: + - apiVersion: catalogd.operatorframework.io/v1alpha1 + blockOwnerDeletion: true + controller: true + kind: Catalog + name: redhat-operators + uid: 403004b6-54a3-4471-8c90-63419f6a2c3e + resourceVersion: "45196" + uid: 252cfe74-936d-44fc-be5d-09a7be7e36f5 +spec: + catalog: + name: redhat-operators + channels: + - entries: + - name: quay-operator.v3.4.7 + skips: + - red-hat-quay.v3.3.4 + - quay-operator.v3.4.6 + - quay-operator.v3.4.5 + - quay-operator.v3.4.4 + - quay-operator.v3.4.3 + - quay-operator.v3.4.2 + - quay-operator.v3.4.1 + - quay-operator.v3.4.0 + name: quay-v3.4 + - entries: + - name: quay-operator.v3.5.7 + replaces: quay-operator.v3.5.6 + skipRange: '>=3.4.x <3.5.7' + name: quay-v3.5 + - entries: + - name: quay-operator.v3.6.0 + skipRange: '>=3.3.x <3.6.0' + - name: quay-operator.v3.6.1 + replaces: quay-operator.v3.6.0 + skipRange: '>=3.3.x <3.6.1' + - name: quay-operator.v3.6.10 + replaces: quay-operator.v3.6.9 + skipRange: '>=3.3.x <3.6.10' + - name: quay-operator.v3.6.2 + replaces: quay-operator.v3.6.1 + skipRange: '>=3.3.x <3.6.2' + - name: quay-operator.v3.6.4 + replaces: quay-operator.v3.6.2 + skipRange: '>=3.3.x <3.6.4' + - name: quay-operator.v3.6.5 + replaces: quay-operator.v3.6.4 + skipRange: '>=3.3.x <3.6.5' + - name: quay-operator.v3.6.6 + replaces: quay-operator.v3.6.5 + skipRange: '>=3.3.x <3.6.6' + - name: quay-operator.v3.6.7 + replaces: quay-operator.v3.6.6 + skipRange: '>=3.3.x <3.6.7' + - name: quay-operator.v3.6.8 + replaces: quay-operator.v3.6.7 + skipRange: '>=3.3.x <3.6.8' + - name: quay-operator.v3.6.9 + replaces: quay-operator.v3.6.8 + skipRange: '>=3.3.x <3.6.9' + name: stable-3.6 + - entries: + - name: quay-operator.v3.7.10 + replaces: quay-operator.v3.7.9 + skipRange: '>=3.4.x <3.7.10' + - name: quay-operator.v3.7.11 + replaces: quay-operator.v3.7.10 + skipRange: '>=3.4.x <3.7.11' + - name: quay-operator.v3.7.12 + replaces: quay-operator.v3.7.11 + skipRange: '>=3.4.x <3.7.12' + - name: quay-operator.v3.7.13 + replaces: quay-operator.v3.7.12 + skipRange: '>=3.4.x <3.7.13' + - name: quay-operator.v3.7.14 + replaces: quay-operator.v3.7.13 + skipRange: '>=3.4.x <3.7.14' + name: stable-3.7 + - entries: + - name: quay-operator.v3.8.0 + skipRange: '>=3.5.x <3.8.0' + - name: quay-operator.v3.8.1 + replaces: quay-operator.v3.8.0 + skipRange: '>=3.5.x <3.8.1' + - name: quay-operator.v3.8.10 + replaces: quay-operator.v3.8.9 + skipRange: '>=3.5.x <3.8.10' + - name: quay-operator.v3.8.11 + replaces: quay-operator.v3.8.10 + skipRange: '>=3.5.x <3.8.11' + - name: quay-operator.v3.8.12 + replaces: quay-operator.v3.8.11 + skipRange: '>=3.5.x <3.8.12' + - name: quay-operator.v3.8.2 + replaces: quay-operator.v3.8.1 + skipRange: '>=3.5.x <3.8.2' + - name: quay-operator.v3.8.3 + replaces: quay-operator.v3.8.2 + skipRange: '>=3.5.x <3.8.3' + - name: quay-operator.v3.8.4 + replaces: quay-operator.v3.8.3 + skipRange: '>=3.5.x <3.8.4' + - name: quay-operator.v3.8.5 + replaces: quay-operator.v3.8.4 + skipRange: '>=3.5.x <3.8.5' + - name: quay-operator.v3.8.6 + replaces: quay-operator.v3.8.5 + skipRange: '>=3.5.x <3.8.6' + - name: quay-operator.v3.8.7 + replaces: quay-operator.v3.8.6 + skipRange: '>=3.5.x <3.8.7' + - name: quay-operator.v3.8.8 + replaces: quay-operator.v3.8.7 + skipRange: '>=3.5.x <3.8.8' + - name: quay-operator.v3.8.9 + replaces: quay-operator.v3.8.8 + skipRange: '>=3.5.x <3.8.9' + name: stable-3.8 + - entries: + - name: quay-operator.v3.9.0 + skipRange: '>=3.6.x <3.9.0' + - name: quay-operator.v3.9.1 + replaces: quay-operator.v3.9.0 + skipRange: '>=3.6.x <3.9.1' + - name: quay-operator.v3.9.2 + replaces: quay-operator.v3.9.1 + skipRange: '>=3.6.x <3.9.2' + name: stable-3.9 + defaultChannel: stable-3.9 + description: "" + icon: + data: PD94bWwgdmVyc2lvbj ... + mediatype: image/svg+xml + packageName: quay-operator +status: {} +---- diff --git a/modules/olmv1-installing-an-operator.adoc b/modules/olmv1-installing-an-operator.adoc new file mode 100644 index 000000000000..8ccff3a7205a --- /dev/null +++ b/modules/olmv1-installing-an-operator.adoc @@ -0,0 +1,132 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: PROCEDURE + +[id="olmv1-installing-an-operator_{context}"] += Installing an Operator + +You can install an Operator from a catalog by creating an Operator custom resource (CR) and applying it to the cluster. + +.Prerequisite + +* You have added a catalog to your cluster. +* You have inspected the details of an Operator to find what version you want to install. + +.Procedure + +. Create an Operator CR, similar to the following example: ++ +.Example `test-operator.yaml` CR +[source,yaml] +---- +apiVersion: operators.operatorframework.io/v1alpha1 +kind: Operator +metadata: + name: quay-example +spec: + packageName: quay-operator + channel: stable-3.8 + version: 3.8.12 +---- + +. Run the following command to apply the Operator CR to the cluster: ++ +[source,terminal] +---- +$ oc apply -f test-operator.yaml +---- ++ +.Example output +[source,text] +---- +operator.operators.operatorframework.io/quay-example created +---- + +.Verification + +. Run the following command to view the Operator's CR in the YAML format: ++ +[source,terminal] +---- +$ oc get operator.operators.operatorframework.io/quay-example -o yaml +---- ++ +.Example output +[source,text] +---- +apiVersion: operators.operatorframework.io/v1alpha1 +kind: Operator +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"operators.operatorframework.io/v1alpha1","kind":"Operator","metadata":{"annotations":{},"name":"quay-example"},"spec":{"channel":"stable-3.8","packageName":"quay-operator","version":"3.8.12"}} + creationTimestamp: "2023-10-06T01:50:25Z" + generation: 1 + name: quay-example + resourceVersion: "65292" + uid: 7e782643-71c2-4355-80ff-e8cc61bd44a1 +spec: + channel: stable-3.8 + packageName: quay-operator + version: 3.8.12 +status: + conditions: + - lastTransitionTime: "2023-10-06T01:50:25Z" + message: resolved to "registry.redhat.io/quay/quay-operator-bundle@sha256:bf26c7679ea1f7b47d2b362642a9234cddb9e366a89708a4ffcbaf4475788dc7" + observedGeneration: 1 + reason: Success + status: "True" + type: Resolved + - lastTransitionTime: "2023-10-06T01:50:38Z" + message: installed from "registry.redhat.io/quay/quay-operator-bundle@sha256:bf26c7679ea1f7b47d2b362642a9234cddb9e366a89708a4ffcbaf4475788dc7" + observedGeneration: 1 + reason: Success + status: "True" + type: Installed + installedBundleResource: registry.redhat.io/quay/quay-operator-bundle@sha256:bf26c7679ea1f7b47d2b362642a9234cddb9e366a89708a4ffcbaf4475788dc7 + resolvedBundleResource: registry.redhat.io/quay/quay-operator-bundle@sha256:bf26c7679ea1f7b47d2b362642a9234cddb9e366a89708a4ffcbaf4475788dc7 +---- +. Run the following command to get information about your Operator's bundle: ++ +[source,terminal] +---- +$ oc get bundle +---- ++ +.Example output +[source,text] +---- +NAME TYPE PHASE AGE +quay-example-b4v8v8 image Unpacked 78s +---- + +. Run the following command to get information about your Operator's bundle deployment: ++ +[source,terminal] +---- +$ oc get bundledeployment +---- ++ +.Example output +[source,text] +---- +NAME ACTIVE BUNDLE INSTALL STATE AGE +quay-example quay-example-b4v8v8 InstallationSucceeded 117s +---- + +. Run the following command to get information about your Operator's controller manager pod: ++ +[source,terminal] +---- +$ oc get pod -n quay-operator-system +---- ++ +.Example output +[source,text] +---- +NAME READY STATUS RESTARTS AGE +quay-operator.v3.8.12-6677b5c98f-2kdtb 1/1 Running 0 2m28s +---- + diff --git a/modules/olmv1-red-hat-catalogs.adoc b/modules/olmv1-red-hat-catalogs.adoc new file mode 100644 index 000000000000..05c9ca53fb32 --- /dev/null +++ b/modules/olmv1-red-hat-catalogs.adoc @@ -0,0 +1,63 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: REFERENCE + +[id="olmv1-red-hat-catalogs_{context}"] += Red Hat-provided Operator catalogs in OLM 1.0 + +Red Hat-provided Operator catalogs have been released in the file-based catalog format since {product-title} 4.11. + +Operator Lifecycle Manager (OLM) 1.0 does not include Red Hat-provided Operator catalogs by default. If you want to add a Red Hat-provided catalog to your cluster, create a custom resource (CR) for the catalog and apply it to the cluster. The following custom resource (CR) examples show how to create a catalog resources for OLM 1.0. + +.Example Red Hat Operators catalog +[source,yaml,subs="attributes+"] +---- +apiVersion: catalogd.operatorframework.io/v1alpha1 +kind: Catalog +metadata: + name: redhat-operators +spec: + source: + type: image + image: + ref: registry.redhat.io/redhat/redhat-operator-index:v{product-version} +---- + +.Example Certified Operators catalog +[source,yaml,subs="attributes+"] +---- +apiVersion: catalogd.operatorframework.io/v1alpha1 +kind: Catalog +metadata: + name: certified-operators +spec: + source: + type: image + image: + ref: registry.redhat.io/redhat/certified-operator-index:v{product-version} +---- + +.Example Community Operators catalog +[source,yaml,subs="attributes+"] +---- +apiVersion: catalogd.operatorframework.io/v1alpha1 +kind: Catalog +metadata: + name: community-operators +spec: + source: + type: image + image: + ref: registry.redhat.io/redhat/community-operator-index:v{product-version} +---- + +The following command adds a catalog to your cluster: + +.Command syntax +[source,terminal] +---- +$ oc apply -f .yaml <1> +---- +<1> Specifies the catalog CR, such as `redhat-operators.yaml`. diff --git a/modules/olmv1-updating-an-operator.adoc b/modules/olmv1-updating-an-operator.adoc new file mode 100644 index 000000000000..a6357424acd5 --- /dev/null +++ b/modules/olmv1-updating-an-operator.adoc @@ -0,0 +1,125 @@ +// Module included in the following assemblies: +// +// * operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc + +:_content-type: PROCEDURE + +[id="olmv1-updating-an-operator_{context}"] += Updating an Operator + +You can update your Operator by manually editing your Operator's custom resource (CR) and applying the changes. + +.Prerequisites + +* You have a catalog installed. +* You have an Operator installed. + +.Procedure + +. Run the following command to inspect your Operator's package contents in the catalog to find which channels and versions are available for updating: ++ +[source,terminal] +---- +$ oc get package - -o yaml +---- ++ +.Example command +[source,terminal] +---- +$ oc get package redhat-operators-quay-operator -o yaml +---- + +. Edit your Operator's CR to update the channel to `stable-3.9` and the version to `3.9.1`:, as shown in the following example: ++ +.Example `test-operator.yaml` CR +[source,yaml] +---- +apiVersion: operators.operatorframework.io/v1alpha1 +kind: Operator +metadata: + name: quay-example +spec: + packageName: quay-operator + channel: stable-3.9 <1> + version: 3.9.1 <2> +---- +<1> Update the channel to `stable-3.9`. +<2> Update the version to `3.9.1` + +. Run the following command to apply the update to the cluster: ++ +[source,terminal] +---- +$ oc apply -f test-operator.yaml +---- ++ +.Example output +[source,text] +---- +operator.operators.operatorframework.io/quay-example configured +---- ++ +[TIP] +==== +You can run the following command to patch and apply the changes to your Operator's channel and version from the CLI: ++ +[source,terminal] +---- +$ oc patch operator.operators.operatorframework.io/quay-example -p \ + '{"spec":{"channel":"stable-3.9","version":"3.9.1"}}' \ + --type=merge +---- ++ +.Example output +[source,text] +---- +operator.operators.operatorframework.io/quay-example patched +---- +==== + +.Verification + +* Run the following command verify that the channel and version updates have been applied: ++ +[source,terminal] +---- +$ oc get operator.operators.operatorframework.io/quay-example -o yaml +---- ++ +.Example output +[source,yaml] +---- +apiVersion: operators.operatorframework.io/v1alpha1 +kind: Operator +metadata: + annotations: + kubectl.kubernetes.io/last-applied-configuration: | + {"apiVersion":"operators.operatorframework.io/v1alpha1","kind":"Operator","metadata":{"annotations":{},"name":"quay-example"},"spec":{"channel":"stable-3.9","packageName":"quay-operator","version":"3.9.1"}} + creationTimestamp: "2023-10-06T01:50:25Z" + generation: 5 + name: quay-example + resourceVersion: "83322" + uid: 7e782643-71c2-4355-80ff-e8cc61bd44a1 +spec: + channel: stable-3.9 <1> + packageName: quay-operator + version: 3.9.1 <2> +status: + conditions: + - lastTransitionTime: "2023-10-06T02:12:30Z" + message: resolved to "registry.redhat.io/quay/quay-operator-bundle@sha256:4864bc0d5c18a84a5f19e5e664b58d3133a2ac2a309c6b5659ab553f33214b09" + observedGeneration: 5 + reason: Success + status: "True" + type: Resolved + - lastTransitionTime: "2023-10-06T02:12:30Z" + message: installed from "registry.redhat.io/quay/quay-operator-bundle@sha256:4864bc0d5c18a84a5f19e5e664b58d3133a2ac2a309c6b5659ab553f33214b09" + observedGeneration: 5 + reason: Success + status: "True" + type: Installed + installedBundleResource: registry.redhat.io/quay/quay-operator-bundle@sha256:4864bc0d5c18a84a5f19e5e664b58d3133a2ac2a309c6b5659ab553f33214b09 + resolvedBundleResource: registry.redhat.io/quay/quay-operator-bundle@sha256:4864bc0d5c18a84a5f19e5e664b58d3133a2ac2a309c6b5659ab553f33214b09 +---- +<1> Verify that the channel is updated to `stable-3.9`. +<2> Verify that the version is updated to `3.9.1`. diff --git a/operators/olm_v1/_attributes b/operators/olm_v1/_attributes new file mode 120000 index 000000000000..20cc1dcb77bf --- /dev/null +++ b/operators/olm_v1/_attributes @@ -0,0 +1 @@ +../../_attributes/ \ No newline at end of file diff --git a/operators/olm_v1/images b/operators/olm_v1/images new file mode 120000 index 000000000000..847b03ed0541 --- /dev/null +++ b/operators/olm_v1/images @@ -0,0 +1 @@ +../../images/ \ No newline at end of file diff --git a/operators/olm_v1/index.adoc b/operators/olm_v1/index.adoc new file mode 100644 index 000000000000..f3b8c5798493 --- /dev/null +++ b/operators/olm_v1/index.adoc @@ -0,0 +1,66 @@ +:_content-type: ASSEMBLY +[id="olmv1-about"] += About Operator Lifecycle Manager v1 (Technology Preview) +include::_attributes/common-attributes.adoc[] +:context: olmv1-about + +toc::[] + +{product-title} 4.14 introduces components for a next-generation iteration of Operator Lifecycle Manager (OLM) as a Technology Preview feature. Known during this phase as OLM v1, the updated framework evolves many of the concepts that have been part of the version of OLM included in {product-title} up to this point, referred to retroactively as OLM v0. + +:FeatureName: OLM v1 +include::snippets/technology-preview.adoc[] + +[id="olmv1-about-mission"] +== Mission statement and OLM v0 + +The mission of Operator Lifecycle Manager (OLM) has been to manage the lifecycle of cluster extensions centrally and declaratively on Kubernetes clusters. Its purpose has always been to make installing, running, and updating functional extensions to the cluster easy, safe, and reproducible for cluster administrators and platform-as-a-service (PaaS) administrators, throughout the lifecycle of the underlying cluster. + +OLM v0, which launched with {product-title} 4 and is included by default, was focused on providing unique support for these specific needs for a particular type of cluster extension, which have been coined as Operators. Operators are classified as one or more Kubernetes controllers, shipping with one or more API extensions (`CustomResourceDefinition` objects) to provide additional functionality to the cluster. + +[id="olmv1-about-what-changed"] +== What requirements have changed? + +After running OLM v0 in production clusters for a number of years, it became apparent that there is an appetite to deviate from this coupling of CRDs and controllers to encompass the lifecycling of extensions that are not just Operators. OLM v1 aims to address and improve upon the following requirements. + +[id="olmv1-about-dependencies"] +=== Dependencies and constraints + +OLM has been helping to define lifecycles for these extensions in which the extensions: + +* get installed, potentially causing other extensions to be installed as well as dependencies +* get customized with the help of customizable configuration at runtime +* get upgraded to newer version/s following upgrade paths defined by extension developers +* and finally, get decommissioned and removed. + +In the dependency model, extensions can rely on each other for required services that are out of scope of the primary purpose of an extension, allowing each extension to focus on a specific purpose. + +OLM also prevents conflicting extensions from running on the cluster, either with conflicting dependency constraints or conflicts in ownership of services provided via APIs. Because cluster extensions must be supported with an enterprise-grade product lifecycle, there has been a growing need for allowing Operator authors to limit installation and upgrade of their extension by specifing addtional environmental constraints as dependencies, primarily to align with what was tested by the Operator author's quality assurance (QA) or quality engineering (QE) processes. + +In other words, there is an evergrowing ask for OLM to allow the author to enforce these support limitations in the form of additional constraints specified by Operator authors in their packaging for OLM. + +[id="olmv1-about-isolation"] +=== Tenant isolation + +During their lifecycle on the cluster, OLM also manages the permissions and capabilities extensions have on the cluster as well as the permission and access tenants on the cluster have to the extensions. This is done using the Kubernetes RBAC system, in combination with tenant isolation using Kubernetes namespaces. + +While the interaction surface of the extensions is solely composed of Kubernetes APIs the extensions define, there is an acute need to rethink the way tenant (consumers of extensions) isolation is achieved. The ask from OLM is to provide tenant isolation in a more intuitive way than is implemented in OLM v0. + +[id="olmv1-about-packaging"] +=== Improved packaging models + +OLM also defines a packaging model in which catalogs of extensions, usually containing the entire version history of each extension, are made available to clusters for cluster users to browse and select from. While these catalogs have so far been packaged and shipped as container images, there is a growing appetite to allow more ways of packaging and shipping these catalogs, besides also simplifying the building process of these catalogs, which so far have been very costly. + +The effort to bring down the cost was kicked off in OLM v0 with conversion of the underlying datastore for catalog metadata to file-based catalogs (FBCs), with more effort being invested to slim down the process in v1. Using new versions of extensions delivered with this packaging system, OLM is able to apply updates to existing running extensions on the cluster in a way where the integrity of the cluster is maintained and constraints and dependencies are kept satisfied. + +[id="olmv1-about-multicluster"] +=== Multi-cluster support + +The scope of OLM's area of operation in v0 is the one cluster it is running on, with namespace-based handling of catalog access and extension API accessibility and discoverability. Expansion of this scope is indirectly expected through the work of the Kubernetes Control Plane (kcp) project. In its first incarnation, kcp will likely use its own synchronization mechanism to get OLM-managed extensions deployed eventually on one or more physical clusters from a shared, virtual control plane called a "workspace". + +While this is an area under active development and subject to change, OLM will most likely need to become aware of kcp in a future state. In OLM v1, the scope of OLM will increase to span multiple clusters following the kcp model, though likely many aspects of this will become transparent to OLM itself through the workspace abstraction that kcp provides. + +[id="olmv1-about-why-build"] +== Why build OLM v1? + +In other words, what needs to change with OLM v1 is how all of the tasks mentioned previously are carried out from the user perspective, how much control users have in the process, and which persona is involved. \ No newline at end of file diff --git a/operators/olm_v1/modules b/operators/olm_v1/modules new file mode 120000 index 000000000000..36719b9de743 --- /dev/null +++ b/operators/olm_v1/modules @@ -0,0 +1 @@ +../../modules/ \ No newline at end of file diff --git a/operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc b/operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc new file mode 100644 index 000000000000..785254d65267 --- /dev/null +++ b/operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc @@ -0,0 +1,55 @@ +:_content-type: ASSEMBLY +[id="olmv1-installing-an-operator-from-a-catalog"] += Installing an Operator from a catalog in OLM 1.0 (Technology Preview) +include::_attributes/common-attributes.adoc[] +:context: olmv1-installing-operator + +toc::[] + +Cluster administrators can add _catalogs_, or curated collections of Operators and Kubernetes extensions, to their clusters. Operator authors publish their products to these catalogs. When you add a catalog to your cluster, you have access to versions, patches, and over-the-air updates of the Operators and extensions that are published in the catalog. + +In the current Technology Preview release of Operator Lifecycle Manager (OLM) 1.0, you manage catalogs and Operators from the CLI using custom resources (CRs). + +:FeatureName: OLM 1.0 +include::snippets/technology-preview.adoc[] + +[id="prerequisites_olmv1-installing-an-operator-from-a-catalog"] +== Prerequisites + +* Access to an {product-title} cluster using an account with `cluster-admin` permissions +* The `TechPreviewNoUpgrades` feature set enabled on the cluster ++ +[WARNING] +==== +Enabling the `TechPreviewNoUpgrade` feature set cannot be undone and prevents minor version updates. These feature sets are not recommended on production clusters. +==== +* The `oc` command installed on your workstation +* `opm` CLI tool +* Docker or Podman +* Push access to a container registry, such as link:https://quay.io[Quay] + +[role="_additional-resources"] +.Additional resources + +* xref:../../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[Enabling features using feature gates] + +include::modules/olmv1-about-catalogs.adoc[leveloffset=+1] +include::modules/olmv1-red-hat-catalogs.adoc[leveloffset=+2] + +[role="_additional-resources"] +.Additional resources +* xref:../../operators/olm_v1/olmv1-installing-an-operator-from-a-catalog.adoc#olmv1-adding-a-catalog-to-a-cluster_olmv1-installing-operator[Adding a catalog to a cluster] +* xref:../../operators/understanding/olm-rh-catalogs.adoc#olm-rh-catalogs_olm-rh-catalogs[About Red Hat-provided Operator catalogs] +* xref:../../operators/olm_v1/index.adoc#olmv1-about-packaging[Improved packaging models] + +[NOTE] +==== +The following procedures use the Red Hat Operators catalog and the Quay Operator as examples. +==== + +include::modules/olmv1-adding-a-catalog.adoc[leveloffset=+1] +include::modules/olmv1-finding-operators-to-install.adoc[leveloffset=+2] +include::modules/olmv1-about-target-versions.adoc[leveloffset=+1] +include::modules/olmv1-installing-an-operator.adoc[leveloffset=+2] +include::modules/olmv1-updating-an-operator.adoc[leveloffset=+2] +include::modules/olmv1-deleting-an-operator.adoc[leveloffset=+2] diff --git a/operators/olm_v1/olmv1-managing-catalogs.adoc b/operators/olm_v1/olmv1-managing-catalogs.adoc new file mode 100644 index 000000000000..9dd97672774e --- /dev/null +++ b/operators/olm_v1/olmv1-managing-catalogs.adoc @@ -0,0 +1,51 @@ +:_content-type: ASSEMBLY +[id="olmv1-managing-catalogs"] += Managing catalogs for OLM v1 (Technology Preview) +include::_attributes/common-attributes.adoc[] +:context: olmv1-managing-catalogs + +toc::[] + +In OLM v1, a _plain bundle_ is a static collection of arbitrary Kubernetes manifests in the YAML format. The experimental `olm.bundle.mediatype` property of the `olm.bundle` schema object differentiates a plain bundle (`plain+v0`) from a regular (`registry+v1`) bundle. + +:FeatureName: OLM v1 +include::snippets/technology-preview.adoc[] + +// For more information, see the [Plain Bundle Specification](https://github.com/operator-framework/rukpak/blob/main/docs/bundles/plain.md) in the RukPak repository. + +As a cluster administrator, you can build and publish a file-based catalog that includes a plain bundle image by completing the following procedures: + +. Build a plain bundle image. +. Create a file-based catalog. +. Add the plain bundle image to your file-based catalog. +. Build your catalog as an image. +. Publish your catalog image. + +[role="_additional-resources"] +.Additional resources + +* xref:../../operators/olm_v1/olmv1-packaging-format.adoc#olmv1-packaging-format[RukPak component and packaging format] + +[id="prerequisites_olmv1-plain-bundles"] +== Prerequisites + +- Access to an {product-title} cluster using an account with `cluster-admin` permissions +- The `TechPreviewNoUpgrades` feature set enabled on the cluster ++ +[WARNING] +==== +Enabling the `TechPreviewNoUpgrade` feature set cannot be undone and prevents minor version updates. These feature sets are not recommended on production clusters. +==== +- The `oc` command installed on your workstation +- `opm` CLI tool +- Docker or Podman +- Push access to a container registry, such as link:https://quay.io[Quay] + +[role="_additional-resources"] +.Additional resources + +* xref:../../nodes/clusters/nodes-cluster-enabling-features.adoc#nodes-cluster-enabling[Enabling features using feature gates] + +// - Only the `redhat-operators` catalog source enabled on the cluster. This is a restriction during the Technology Preview release. + +include::modules/olmv1-catalog-plain.adoc[leveloffset=+1] \ No newline at end of file diff --git a/operators/olm_v1/olmv1-packaging-format.adoc b/operators/olm_v1/olmv1-packaging-format.adoc new file mode 100644 index 000000000000..11455ded5ec7 --- /dev/null +++ b/operators/olm_v1/olmv1-packaging-format.adoc @@ -0,0 +1,31 @@ +:_content-type: ASSEMBLY +[id="olmv1-packaging-format"] += Packaging format for OLM v1 (Technology Preview) +include::_attributes/common-attributes.adoc[] +:context: olmv1-packaging-format + +toc::[] + +OLM v1 uses the RukPak component and its resources to manage content. + +:FeatureName: OLM v1 +include::snippets/technology-preview.adoc[] + +include::modules/olm-rukpak-about.adoc[leveloffset=+1] +[role="_additional-resources"] +.Additional resources + +* xref:../../operators/admin/olm-managing-po.adoc#olm-managing-po[Managing platform Operators] +* xref:../../operators/admin/olm-managing-po.adoc#olm-po-techpreview_olm-managing-po[Technology Preview restrictions for platform Operators] + +include::modules/olm-rukpak-bundle.adoc[leveloffset=+2] +include::modules/olm-rukpak-bundle-immutability.adoc[leveloffset=+3] +include::modules/olm-rukpak-plain-bundle.adoc[leveloffset=+3] +include::modules/olm-rukpak-registry-bundle.adoc[leveloffset=+3] +[role="_additional-resources"] +.Additional resources + +* xref:../../operators/understanding/olm-packaging-format.adoc#olm-bundle-format_olm-packaging-format[Legacy OLM bundle format] + +include::modules/olm-rukpak-bd.adoc[leveloffset=+2] +include::modules/olm-rukpak-provisioner.adoc[leveloffset=+2] \ No newline at end of file diff --git a/operators/olm_v1/snippets b/operators/olm_v1/snippets new file mode 120000 index 000000000000..5a3f5add140e --- /dev/null +++ b/operators/olm_v1/snippets @@ -0,0 +1 @@ +../../snippets/ \ No newline at end of file