diff --git a/_topic_map.yml b/_topic_map.yml index 6642aeda31c2..cb8d17efbd9f 100644 --- a/_topic_map.yml +++ b/_topic_map.yml @@ -81,8 +81,8 @@ Name: Release notes Dir: release_notes Distros: openshift-enterprise Topics: -- Name: OpenShift Container Platform 4.8 release notes - File: ocp-4-8-release-notes +- Name: OpenShift Container Platform 4.9 release notes + File: ocp-4-9-release-notes - Name: Versioning policy File: versioning-policy --- diff --git a/logging/cluster-logging-release-notes.adoc b/logging/cluster-logging-release-notes.adoc index 8c270ec4b494..bafda8070caa 100644 --- a/logging/cluster-logging-release-notes.adoc +++ b/logging/cluster-logging-release-notes.adoc @@ -8,7 +8,14 @@ toc::[] [id="openshift-logging-supported-versions"] == Supported versions -OpenShift Logging 5.0, 5.1, and 5.2 run on {product-title} 4.7 and 4.8. +.{product-title} version support for Red Hat OpenShift Logging (RHOL) +[options="header"] +|==== +| |4.7 |4.8 |4.9 +|RHOL 5.0|X |X | +|RHOL 5.1|X |X | +|RHOL 5.2|X |X |X +|==== include::modules/con_making-open-source-more-inclusive.adoc[leveloffset=+1] diff --git a/logging/cluster-logging-upgrading.adoc b/logging/cluster-logging-upgrading.adoc index 0ec96b4edbaa..5eec82c41acf 100644 --- a/logging/cluster-logging-upgrading.adoc +++ b/logging/cluster-logging-upgrading.adoc @@ -5,7 +5,14 @@ include::modules/common-attributes.adoc[] toc::[] -{product-title} 4.7 and 4.8 support OpenShift Logging 5.0, 5.1, and 5.2. +.{product-title} version support for Red Hat OpenShift Logging (RHOL) +[options="header"] +|==== +| |4.7 |4.8 |4.9 +|RHOL 5.0|X |X | +|RHOL 5.1|X |X | +|RHOL 5.2|X |X |X +|==== To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7 or 4.8. Then, you update the following operators: diff --git a/metering/metering-upgrading-metering.adoc b/metering/metering-upgrading-metering.adoc index 373f24cf3fa8..9daab4d191d0 100644 --- a/metering/metering-upgrading-metering.adoc +++ b/metering/metering-upgrading-metering.adoc @@ -46,11 +46,11 @@ Wait several seconds to allow the subscription to update before proceeding to th ==== . Click *Operators* -> *Installed Operators*. + -The Metering Operator is shown as 4.8. For example: +The Metering Operator is shown as 4.9. For example: + ---- Metering -4.8.0-202107012112.p0 provided by Red Hat, Inc +4.9.0-202107012112.p0 provided by Red Hat, Inc ---- .Verification @@ -73,11 +73,11 @@ You can verify the metering upgrade by performing any of the following checks: $ oc get csv | grep metering ---- + -.Example output for metering upgrade from 4.7 to 4.8 +.Example output for metering upgrade from 4.8 to 4.9 [source,terminal] ---- NAME DISPLAY VERSION REPLACES PHASE -metering-operator.4.8.0-202107012112.p0 Metering 4.8.0-202107012112.p0 metering-operator.4.7.0-202007012112.p0 Succeeded +metering-operator.4.9.0-202107012112.p0 Metering 4.9.0-202107012112.p0 metering-operator.4.8.0-202007012112.p0 Succeeded ---- -- diff --git a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc index 85a5e85c9738..74ce35e52f7b 100644 --- a/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc +++ b/migrating_from_ocp_3_to_4/planning-migration-3-4.adoc @@ -8,7 +8,7 @@ toc::[] {product-title} {product-version} introduces architectural changes and enhancements. The procedures that you used to manage your {product-title} 3 cluster might not apply to {product-title} 4. ifndef::openshift-origin[] -For information on configuring your {product-title} 4 cluster, review the appropriate sections of the {product-title} documentation. For information on new features and other notable technical changes, review the xref:../release_notes/ocp-4-8-release-notes.adoc#ocp-4-8-release-notes[OpenShift Container Platform 4.8 release notes]. +For information on configuring your {product-title} 4 cluster, review the appropriate sections of the {product-title} documentation. For information on new features and other notable technical changes, review the xref:../release_notes/ocp-4-9-release-notes.adoc#ocp-4-9-release-notes[OpenShift Container Platform 4.9 release notes]. endif::[] It is not possible to upgrade your existing {product-title} 3 cluster to {product-title} 4. You must start with a new {product-title} 4 installation. Tools are available to assist in migrating your control plane settings and application workloads. diff --git a/modules/cluster-logging-systemd-scaling.adoc b/modules/cluster-logging-systemd-scaling.adoc index 5adbec2c6bff..760e949cb818 100644 --- a/modules/cluster-logging-systemd-scaling.adoc +++ b/modules/cluster-logging-systemd-scaling.adoc @@ -27,7 +27,7 @@ See "Creating machine configs with Butane" for information about Butane. [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 40-worker-custom-journald labels: diff --git a/modules/cluster-logging-updating-logging-to-5-0.adoc b/modules/cluster-logging-updating-logging-to-5-0.adoc index 8a7558384781..d587368d0e03 100644 --- a/modules/cluster-logging-updating-logging-to-5-0.adoc +++ b/modules/cluster-logging-updating-logging-to-5-0.adoc @@ -7,7 +7,7 @@ * The _Cluster Logging_ Operator became the _Red Hat OpenShift Logging_ Operator. * The _Elasticsearch_ Operator became _OpenShift Elasticsearch_ Operator. -To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7 or 4.8. Then, you update the following operators: +To upgrade from cluster logging in {product-title} version 4.6 and earlier to OpenShift Logging 5.x, you update the {product-title} cluster to version 4.7, 4.8, or 4.9. Then, you update the following operators: * From Elasticsearch Operator 4.x to OpenShift Elasticsearch Operator 5.x * From Cluster Logging Operator 4.x to Red Hat OpenShift Logging Operator 5.x diff --git a/modules/cluster-node-tuning-operator-verify-profiles.adoc b/modules/cluster-node-tuning-operator-verify-profiles.adoc index a35733483e06..e46e406b9f89 100644 --- a/modules/cluster-node-tuning-operator-verify-profiles.adoc +++ b/modules/cluster-node-tuning-operator-verify-profiles.adoc @@ -5,7 +5,7 @@ [id="verifying-tuned-profiles-are-applied_{context}"] = Verifying that the TuneD profiles are applied -Starting with {product-title} 4.8, it is no longer necessary to check the TuneD pod logs +It is no longer necessary to check the TuneD pod logs to find which TuneD profiles are applied on cluster nodes. [source,terminal] diff --git a/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc b/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc index 65d080129c6c..be7143bbb891 100644 --- a/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc +++ b/modules/cnf-configure_for_irq_dynamic_load_balancing.adoc @@ -44,7 +44,7 @@ metadata: spec: containers: - name: dynamic-irq-pod - image: "quay.io/openshift-kni/cnf-tests:4.8" + image: "quay.io/openshift-kni/cnf-tests:4.9" command: ["sleep", "10h"] resources: requests: diff --git a/modules/cnf-installing-the-performance-addon-operator.adoc b/modules/cnf-installing-the-performance-addon-operator.adoc index 0ede0da12544..d88954382587 100644 --- a/modules/cnf-installing-the-performance-addon-operator.adoc +++ b/modules/cnf-installing-the-performance-addon-operator.adoc @@ -75,7 +75,7 @@ $ oc get packagemanifest performance-addon-operator -n openshift-marketplace -o .Example output [source,terminal] ---- -4.8 +4.9 ---- .. Create the following Subscription CR and save the YAML in the `pao-sub.yaml` file: diff --git a/modules/cnf-performing-end-to-end-tests-for-platform-verification.adoc b/modules/cnf-performing-end-to-end-tests-for-platform-verification.adoc index 03dd2967f58c..9e057441c2ae 100644 --- a/modules/cnf-performing-end-to-end-tests-for-platform-verification.adoc +++ b/modules/cnf-performing-end-to-end-tests-for-platform-verification.adoc @@ -68,7 +68,7 @@ You can use the `ROLE_WORKER_CNF` variable to override the worker pool name: [source,terminal] ---- $ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig -e -ROLE_WORKER_CNF=custom-worker-pool registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh +ROLE_WORKER_CNF=custom-worker-pool registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh ---- + [NOTE] @@ -83,7 +83,7 @@ Use this command to run in dry-run mode. This is useful for checking what is in [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh -ginkgo.dryRun -ginkgo.v +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh -ginkgo.dryRun -ginkgo.v ---- [id="cnf-performing-end-to-end-tests-disconnected-mode_{context}"] @@ -104,7 +104,7 @@ Run this command from an intermediate machine that has access both to the cluste [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/mirror -registry my.local.registry:5000/ | oc image mirror -f - +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/mirror -registry my.local.registry:5000/ | oc image mirror -f - ---- Then, follow the instructions in the following section about overriding the registry used to fetch the images. @@ -116,7 +116,7 @@ This is done by setting the `IMAGE_REGISTRY` environment variable: [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig -e IMAGE_REGISTRY="my.local.registry:5000/" -e CNF_TESTS_IMAGE="custom-cnf-tests-image:latests" registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig -e IMAGE_REGISTRY="my.local.registry:5000/" -e CNF_TESTS_IMAGE="custom-cnf-tests-image:latests" registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh ---- [id="cnf-performing-end-to-end-tests-mirroring-to-cluster-internal-registry_{context}"] @@ -213,7 +213,7 @@ echo "{\"auths\": { \"$REGISTRY\": { \"auth\": $TOKEN } }}" > dockerauth.json + [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/mirror -registry $REGISTRY/cnftests | oc image mirror --insecure=true -a=$(pwd)/dockerauth.json -f - +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/mirror -registry $REGISTRY/cnftests | oc image mirror --insecure=true -a=$(pwd)/dockerauth.json -f - ---- . Run the tests: @@ -235,11 +235,11 @@ $ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig -e IMAG [ { "registry": "public.registry.io:5000", - "image": "imageforcnftests:4.8" + "image": "imageforcnftests:4.9" }, { "registry": "public.registry.io:5000", - "image": "imagefordpdk:4.8" + "image": "imagefordpdk:4.9" } ] ---- @@ -248,7 +248,7 @@ $ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig -e IMAG + [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/mirror --registry "my.local.registry:5000/" --images "/kubeconfig/images.json" | oc image mirror -f - +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/mirror --registry "my.local.registry:5000/" --images "/kubeconfig/images.json" | oc image mirror -f - ---- [id="cnf-performing-end-to-end-tests-running-in-single-node-cluster_{context}"] @@ -342,7 +342,7 @@ For example, to change the `CNF_TESTS_IMAGE` with a custom registry run the foll [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig -e CNF_TESTS_IMAGE="custom-cnf-tests-image:latests" registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig -e CNF_TESTS_IMAGE="custom-cnf-tests-image:latests" registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh ---- [id="cnf-performing-end-to-end-tests-ginko-parameters_{context}"] @@ -354,7 +354,7 @@ You can use the `-ginkgo.focus` parameter to filter a set of tests: [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh -ginkgo.focus="performance|sctp" +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh -ginkgo.focus="performance|sctp" ---- You can run only the latency test using the `-ginkgo.focus` parameter. @@ -363,7 +363,7 @@ To run only the latency test, you must provide the `-ginkgo.focus` parameter and [source,terminal] ---- -$ docker run --rm -v $KUBECONFIG:/kubeconfig -e KUBECONFIG=/kubeconfig -e LATENCY_TEST_RUN=true -e LATENCY_TEST_RUNTIME=600 -e OSLAT_MAXIMUM_LATENCY=20 -e PERF_TEST_PROFILE= registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh -ginkgo.focus="\[performance\]\[config\]|\[performance\]\ Latency\ Test" +$ docker run --rm -v $KUBECONFIG:/kubeconfig -e KUBECONFIG=/kubeconfig -e LATENCY_TEST_RUN=true -e LATENCY_TEST_RUNTIME=600 -e OSLAT_MAXIMUM_LATENCY=20 -e PERF_TEST_PROFILE= registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh -ginkgo.focus="\[performance\]\[config\]|\[performance\]\ Latency\ Test" ---- [NOTE] @@ -525,7 +525,7 @@ Assuming the `kubeconfig` file is in the current folder, the command for running [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh +$ docker run -v $(pwd)/:/kubeconfig -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh ---- This allows your `kubeconfig` file to be consumed from inside the running container. @@ -844,7 +844,7 @@ A JUnit-compliant XML is produced by passing the `--junit` parameter together wi [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -v $(pwd)/junitdest:/path/to/junit -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh --junit /path/to/junit +$ docker run -v $(pwd)/:/kubeconfig -v $(pwd)/junitdest:/path/to/junit -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh --junit /path/to/junit ---- [id="cnf-performing-end-to-end-tests-test-failure-report_{context}"] @@ -854,7 +854,7 @@ A report with information about the cluster state and resources for troubleshoot [source,terminal] ---- -$ docker run -v $(pwd)/:/kubeconfig -v $(pwd)/reportdest:/path/to/report -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh --report /path/to/report +$ docker run -v $(pwd)/:/kubeconfig -v $(pwd)/reportdest:/path/to/report -e KUBECONFIG=/kubeconfig/kubeconfig registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh --report /path/to/report ---- [id="cnf-performing-end-to-end-tests-podman_{context}"] @@ -915,5 +915,5 @@ To override the performance profile, the manifest must be mounted inside the con [source,termal] ---- -$ docker run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig -e PERFORMANCE_PROFILE_MANIFEST_OVERRIDE=/kubeconfig/manifest.yaml registry.redhat.io/openshift4/cnf-tests-rhel8:v4.8 /usr/bin/test-run.sh +$ docker run -v $(pwd)/:/kubeconfig:Z -e KUBECONFIG=/kubeconfig/kubeconfig -e PERFORMANCE_PROFILE_MANIFEST_OVERRIDE=/kubeconfig/manifest.yaml registry.redhat.io/openshift4/cnf-tests-rhel8:v4.9 /usr/bin/test-run.sh ---- diff --git a/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc b/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc index 0cccf8a51161..6bd6b9454cbd 100644 --- a/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc +++ b/modules/cnf-provisioning-real-time-and-low-latency-workloads.adoc @@ -89,7 +89,7 @@ EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME cnf-worker-0.example.com Ready worker,worker-rt 5d17h v1.22.1 128.66.135.107 Red Hat Enterprise Linux CoreOS 46.82.202008252340-0 (Ootpa) -4.18.0-211.rt5.23.el8.x86_64 cri-o://1.22.1-90.rhaos4.8.git4a0ac05.el8-rc.1 +4.18.0-211.rt5.23.el8.x86_64 cri-o://1.22.1-90.rhaos4.9.git4a0ac05.el8-rc.1 [...] ---- diff --git a/modules/cnf-running-the-performance-creator-profile-offline.adoc b/modules/cnf-running-the-performance-creator-profile-offline.adoc index fe096570bbd8..2831f0af0e56 100644 --- a/modules/cnf-running-the-performance-creator-profile-offline.adoc +++ b/modules/cnf-running-the-performance-creator-profile-offline.adoc @@ -34,7 +34,7 @@ readonly IMG_EXISTS_CMD="${CONTAINER_RUNTIME} image exists" readonly IMG_PULL_CMD="${CONTAINER_RUNTIME} image pull" readonly MUST_GATHER_VOL="/must-gather" -PAO_IMG="quay.io/openshift-kni/performance-addon-operator:4.8-snapshot" +PAO_IMG="quay.io/openshift-kni/performance-addon-operator:4.9-snapshot" MG_TARBALL="" DATA_DIR="" @@ -162,7 +162,7 @@ There two types of arguments: * PPC arguments ==== + -<1> Optional: Specify the Performance Addon Operator image. If not set, the default upstream image is used: `quay.io/openshift-kni/performance-addon-operator:4.8-snapshot`. +<1> Optional: Specify the Performance Addon Operator image. If not set, the default upstream image is used: `quay.io/openshift-kni/performance-addon-operator:4.9-snapshot`. <2> `-t` is a required wrapper script argument and specifies the path to a `must-gather` tarball. . Run the performance profile creator tool in discovery mode: diff --git a/modules/cnf-running-the-performance-creator-profile.adoc b/modules/cnf-running-the-performance-creator-profile.adoc index b20049e18d3a..86e086201f78 100644 --- a/modules/cnf-running-the-performance-creator-profile.adoc +++ b/modules/cnf-running-the-performance-creator-profile.adoc @@ -36,7 +36,7 @@ worker-cnf rendered-worker-cnf-1d871ac76e1951d32b2fe92369879826 False Tr + [source,terminal] ---- -$ podman run --entrypoint performance-profile-creator quay.io/openshift-kni/performance-addon-operator:4.8-snapshot -h +$ podman run --entrypoint performance-profile-creator quay.io/openshift-kni/performance-addon-operator:4.9-snapshot -h ---- + .Example output @@ -77,7 +77,7 @@ Using this information you can set appropriate values for some of the arguments + [source,terminal] ---- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z quay.io/openshift-kni/performance-addon-operator:4.8-snapshot --info log --must-gather-dir-path /must-gather +$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z quay.io/openshift-kni/performance-addon-operator:4.9-snapshot --info log --must-gather-dir-path /must-gather ---- + [NOTE] @@ -96,7 +96,7 @@ The `info` option requires a value which specifies the output format. Possible v + [source,terminal] ---- -$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z quay.io/openshift-kni/performance-addon-operator:4.8-snapshot --mcp-name=worker-cnf --reserved-cpu-count=20 --rt-kernel=true --split-reserved-cpus-across-numa=false --topology-manager-policy=single-numa-node --must-gather-dir-path /must-gather --power-consumption-mode=ultra-low-latency > my-performance-profile.yaml +$ podman run --entrypoint performance-profile-creator -v /must-gather:/must-gather:z quay.io/openshift-kni/performance-addon-operator:4.9-snapshot --mcp-name=worker-cnf --reserved-cpu-count=20 --rt-kernel=true --split-reserved-cpus-across-numa=false --topology-manager-policy=single-numa-node --must-gather-dir-path /must-gather --power-consumption-mode=ultra-low-latency > my-performance-profile.yaml ---- + [NOTE] diff --git a/modules/cnf-upgrading-performance-addon-operator.adoc b/modules/cnf-upgrading-performance-addon-operator.adoc index b0ca4a44d464..234d7fa154af 100644 --- a/modules/cnf-upgrading-performance-addon-operator.adoc +++ b/modules/cnf-upgrading-performance-addon-operator.adoc @@ -115,8 +115,8 @@ $ oc get csv [source,terminal] ---- VERSION REPLACES PHASE -4.8.0 performance-addon-operator.v4.8.0 Installing -4.7.0 Replacing +4.9.0 performance-addon-operator.v4.9.0 Installing +4.8.0 Replacing ---- . Run `get csv` again to verify the output: @@ -130,5 +130,5 @@ VERSION REPLACES PHASE [source,terminal] ---- NAME DISPLAY VERSION REPLACES PHASE -performance-addon-operator.v4.8.0 Performance Addon Operator 4.8.0 performance-addon-operator.v4.7.0 Succeeded +performance-addon-operator.v4.9.0 Performance Addon Operator 4.9.0 performance-addon-operator.v4.8.0 Succeeded ---- diff --git a/modules/common-attributes.adoc b/modules/common-attributes.adoc index 35664ce4e74c..58a184781869 100644 --- a/modules/common-attributes.adoc +++ b/modules/common-attributes.adoc @@ -1,4 +1,4 @@ -// The {product-title} attribute provides the context-sensitive name of the relevant OpenShift distribution, for example, "OpenShift Container Platform" or "OKD". The {product-version} attribute provides the product version relative to the distribution, for example "4.8". +// The {product-title} attribute provides the context-sensitive name of the relevant OpenShift distribution, for example, "OpenShift Container Platform" or "OKD". The {product-version} attribute provides the product version relative to the distribution, for example "4.9". // {product-title} and {product-version} are parsed when AsciiBinder queries the _distro_map.yml file in relation to the base branch of a pull request. // See https://github.com/openshift/openshift-docs/blob/main/contributing_to_docs/doc_guidelines.adoc#product-name-version for more information on this topic. // Other common attributes are defined in the following lines: diff --git a/modules/containers-signature-verify-enable.adoc b/modules/containers-signature-verify-enable.adoc index aade95a16888..faeb7540fc67 100644 --- a/modules/containers-signature-verify-enable.adoc +++ b/modules/containers-signature-verify-enable.adoc @@ -17,7 +17,7 @@ See "Creating machine configs with Butane" for information about Butane. [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 51-worker-rh-registry-trust labels: diff --git a/modules/developer-cli-odo-installing-odo-on-linux.adoc b/modules/developer-cli-odo-installing-odo-on-linux.adoc index 4acb6f201f7f..b3bd63e9c460 100644 --- a/modules/developer-cli-odo-installing-odo-on-linux.adoc +++ b/modules/developer-cli-odo-installing-odo-on-linux.adoc @@ -78,7 +78,7 @@ + [source,terminal] ---- -# subscription-manager repos --enable="ocp-tools-4.8-for-rhel-8-x86_64-rpms" +# subscription-manager repos --enable="ocp-tools-4.9-for-rhel-8-x86_64-rpms" ---- . Install the `{odo-title}` package: diff --git a/modules/gathering-data-specific-features.adoc b/modules/gathering-data-specific-features.adoc index 11bae20c9cd3..c4f7b5e58b25 100644 --- a/modules/gathering-data-specific-features.adoc +++ b/modules/gathering-data-specific-features.adoc @@ -38,7 +38,7 @@ ifndef::openshift-origin[] |`registry.redhat.io/rhmtc/openshift-migration-must-gather-rhel8:v{mtc-version}` |Data collection for the {mtc-full}. -|`registry.redhat.io/ocs4/ocs-must-gather-rhel8:v4.8` +|`registry.redhat.io/ocs4/ocs-must-gather-rhel8:v4.9` |Data collection for Red Hat OpenShift Container Storage. |`registry.redhat.io/openshift4/ose-cluster-logging-operator` diff --git a/modules/gitops-release-notes-1-2.adoc b/modules/gitops-release-notes-1-2.adoc index 115a5cf0b90c..ed927ff81768 100644 --- a/modules/gitops-release-notes-1-2.adoc +++ b/modules/gitops-release-notes-1-2.adoc @@ -5,7 +5,7 @@ [id="gitops-release-notes-1-2_{context}"] = Release notes for {gitops-title} 1.2 -{gitops-title} 1.2 is now available on {product-title} 4.7 and 4.8. +{gitops-title} 1.2 is now available on {product-title} 4.7, 4.8, and 4.9. [id="support-matrix-1-2_{context}"] == Support matrix @@ -60,7 +60,7 @@ spec: route: enabled: true ---- -* You can now define hostnames using route labels to support router sharding. Support for setting labels on the `server` (argocd server), `grafana`, and `prometheus` routes is now available. To set labels on a route, add `labels` under the route configuration for a server in the `ArgoCD` CR. +* You can now define hostnames using route labels to support router sharding. Support for setting labels on the `server` (argocd server), `grafana`, and `prometheus` routes is now available. To set labels on a route, add `labels` under the route configuration for a server in the `ArgoCD` CR. + .Example `ArgoCD` CR YAML to set labels on argocd server [source,yaml] @@ -75,11 +75,11 @@ spec: server: route: enabled: true - labels: + labels: key1: value1 key2: value2 ---- -* The GitOps Operator now automatically grants permissions to Argo CD instances to manage resources in target namespaces by applying labels. Users can label the target namespace with the label `argocd.argoproj.io/managed-by: `, where the `source-namespace` is the namespace where the argocd instance is deployed. +* The GitOps Operator now automatically grants permissions to Argo CD instances to manage resources in target namespaces by applying labels. Users can label the target namespace with the label `argocd.argoproj.io/managed-by: `, where the `source-namespace` is the namespace where the argocd instance is deployed. [id="fixed-issues-1-2_{context}"] == Fixed issues @@ -100,7 +100,7 @@ Current Resource Quota for `openshift-gitops` namespace. + [cols="1,1,1",options="header"] |=== -| *Resource* | *Requests* | *Limits* +| *Resource* | *Requests* | *Limits* | CPU | 6688m @@ -111,7 +111,7 @@ Current Resource Quota for `openshift-gitops` namespace. | 9070Mi |=== -+ ++ You can use the below command to update the CPU limits. + [source,terminal] @@ -125,9 +125,5 @@ You can use the below command to update the CPU requests. ---- $ oc patch resourcequota openshift-gitops-compute-resources -n openshift-gitops --type='json' -p='[{"op": "replace", "path": "/spec/hard/cpu", "value":"7000m"}] ---- -+ ++ You can replace the path in the above commands from `cpu` to `memory` to update the memory. - - - - diff --git a/modules/graceful-restart.adoc b/modules/graceful-restart.adoc index 920f433cf088..7b0dcf9d53af 100644 --- a/modules/graceful-restart.adoc +++ b/modules/graceful-restart.adoc @@ -118,14 +118,14 @@ Check that there are no cluster Operators with the `DEGRADED` condition set to ` [source,terminal] ---- NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.8.0 True False False 59m -cloud-credential 4.8.0 True False False 85m -cluster-autoscaler 4.8.0 True False False 73m -config-operator 4.8.0 True False False 73m -console 4.8.0 True False False 62m -csi-snapshot-controller 4.8.0 True False False 66m -dns 4.8.0 True False False 76m -etcd 4.8.0 True False False 76m +authentication 4.9.0 True False False 59m +cloud-credential 4.9.0 True False False 85m +cluster-autoscaler 4.9.0 True False False 73m +config-operator 4.9.0 True False False 73m +console 4.9.0 True False False 62m +csi-snapshot-controller 4.9.0 True False False 66m +dns 4.9.0 True False False 76m +etcd 4.9.0 True False False 76m ... ---- diff --git a/modules/insights-operator-one-time-gather.adoc b/modules/insights-operator-one-time-gather.adoc index f54ef2d01094..7b2fa6ef8db3 100644 --- a/modules/insights-operator-one-time-gather.adoc +++ b/modules/insights-operator-one-time-gather.adoc @@ -5,7 +5,7 @@ [id="insights-operator-one-time-gather_{context}"] -= Running an Insights Operator gather operation += Running an Insights Operator gather operation You must run a gather operation to create an Insights Operator archive. @@ -19,7 +19,7 @@ You must run a gather operation to create an Insights Operator archive. + [source,yaml] ---- -include::https://raw.githubusercontent.com/openshift/insights-operator/release-4.8/docs/gather-job.yaml[] +include::https://raw.githubusercontent.com/openshift/insights-operator/release-4.9/docs/gather-job.yaml[] ---- . Copy your `insights-operator` image version: + @@ -56,9 +56,9 @@ $ oc describe -n openshift-insights job/insights-operator-job Events: Type Reason Age From Message ---- ------ ---- ---- ------- - Normal SuccessfulCreate 7m18s job-controller Created pod: insights-operator-job-__ + Normal SuccessfulCreate 7m18s job-controller Created pod: insights-operator-job-__ ---- -where `insights-operator-job-__` is the name of the pod. +where `insights-operator-job-__` is the name of the pod. . Verify that the operation has finished: + @@ -70,7 +70,7 @@ $ oc logs -n openshift-insights insights-operator-job-__ insights-oper .Example output [source,terminal] ---- -I0407 11:55:38.192084 1 diskrecorder.go:34] Wrote 108 records to disk in 33ms +I0407 11:55:38.192084 1 diskrecorder.go:34] Wrote 108 records to disk in 33ms ---- . Save the created archive: + @@ -84,4 +84,3 @@ $ oc cp openshift-insights/insights-operator-job-__:/var/lib/insights- ---- $ oc delete -n openshift-insights job insights-operator-job ---- - diff --git a/modules/installation-aws-upload-custom-rhcos-ami.adoc b/modules/installation-aws-upload-custom-rhcos-ami.adoc index cc076ceb69d0..b1c92d47ecff 100644 --- a/modules/installation-aws-upload-custom-rhcos-ami.adoc +++ b/modules/installation-aws-upload-custom-rhcos-ami.adoc @@ -56,7 +56,7 @@ variable: ---- $ export RHCOS_VERSION= <1> ---- -<1> The {op-system} VMDK version, like `4.8.0`. +<1> The {op-system} VMDK version, like `4.9.0`. . Export the Amazon S3 bucket name as an environment variable: + diff --git a/modules/installation-complete-user-infra.adoc b/modules/installation-complete-user-infra.adoc index 6bcf2c706751..60522f3991f9 100644 --- a/modules/installation-complete-user-infra.adoc +++ b/modules/installation-complete-user-infra.adoc @@ -64,37 +64,37 @@ $ watch -n5 oc get clusteroperators [source,terminal] ---- NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.8.2 True False False 19m -baremetal 4.8.2 True False False 37m -cloud-credential 4.8.2 True False False 40m -cluster-autoscaler 4.8.2 True False False 37m -config-operator 4.8.2 True False False 38m -console 4.8.2 True False False 26m -csi-snapshot-controller 4.8.2 True False False 37m -dns 4.8.2 True False False 37m -etcd 4.8.2 True False False 36m -image-registry 4.8.2 True False False 31m -ingress 4.8.2 True False False 30m -insights 4.8.2 True False False 31m -kube-apiserver 4.8.2 True False False 26m -kube-controller-manager 4.8.2 True False False 36m -kube-scheduler 4.8.2 True False False 36m -kube-storage-version-migrator 4.8.2 True False False 37m -machine-api 4.8.2 True False False 29m -machine-approver 4.8.2 True False False 37m -machine-config 4.8.2 True False False 36m -marketplace 4.8.2 True False False 37m -monitoring 4.8.2 True False False 29m -network 4.8.2 True False False 38m -node-tuning 4.8.2 True False False 37m -openshift-apiserver 4.8.2 True False False 32m -openshift-controller-manager 4.8.2 True False False 30m -openshift-samples 4.8.2 True False False 32m -operator-lifecycle-manager 4.8.2 True False False 37m -operator-lifecycle-manager-catalog 4.8.2 True False False 37m -operator-lifecycle-manager-packageserver 4.8.2 True False False 32m -service-ca 4.8.2 True False False 38m -storage 4.8.2 True False False 37m +authentication 4.9.0 True False False 19m +baremetal 4.9.0 True False False 37m +cloud-credential 4.9.0 True False False 40m +cluster-autoscaler 4.9.0 True False False 37m +config-operator 4.9.0 True False False 38m +console 4.9.0 True False False 26m +csi-snapshot-controller 4.9.0 True False False 37m +dns 4.9.0 True False False 37m +etcd 4.9.0 True False False 36m +image-registry 4.9.0 True False False 31m +ingress 4.9.0 True False False 30m +insights 4.9.0 True False False 31m +kube-apiserver 4.9.0 True False False 26m +kube-controller-manager 4.9.0 True False False 36m +kube-scheduler 4.9.0 True False False 36m +kube-storage-version-migrator 4.9.0 True False False 37m +machine-api 4.9.0 True False False 29m +machine-approver 4.9.0 True False False 37m +machine-config 4.9.0 True False False 36m +marketplace 4.9.0 True False False 37m +monitoring 4.9.0 True False False 29m +network 4.9.0 True False False 38m +node-tuning 4.9.0 True False False 37m +openshift-apiserver 4.9.0 True False False 32m +openshift-controller-manager 4.9.0 True False False 30m +openshift-samples 4.9.0 True False False 32m +operator-lifecycle-manager 4.9.0 True False False 37m +operator-lifecycle-manager-catalog 4.9.0 True False False 37m +operator-lifecycle-manager-packageserver 4.9.0 True False False 32m +service-ca 4.9.0 True False False 38m +storage 4.9.0 True False False 37m ---- + Alternatively, the following command notifies you when all of the clusters are available. It also retrieves and displays credentials: diff --git a/modules/installation-operators-config.adoc b/modules/installation-operators-config.adoc index 6487586a2445..4bf6d797cf96 100644 --- a/modules/installation-operators-config.adoc +++ b/modules/installation-operators-config.adoc @@ -36,36 +36,36 @@ $ watch -n5 oc get clusteroperators [source,terminal] ---- NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.8.2 True False False 19m -baremetal 4.8.2 True False False 37m -cloud-credential 4.8.2 True False False 40m -cluster-autoscaler 4.8.2 True False False 37m -config-operator 4.8.2 True False False 38m -console 4.8.2 True False False 26m -csi-snapshot-controller 4.8.2 True False False 37m -dns 4.8.2 True False False 37m -etcd 4.8.2 True False False 36m -image-registry 4.8.2 True False False 31m -ingress 4.8.2 True False False 30m -insights 4.8.2 True False False 31m -kube-apiserver 4.8.2 True False False 26m -kube-controller-manager 4.8.2 True False False 36m -kube-scheduler 4.8.2 True False False 36m -kube-storage-version-migrator 4.8.2 True False False 37m -machine-api 4.8.2 True False False 29m -machine-approver 4.8.2 True False False 37m -machine-config 4.8.2 True False False 36m -marketplace 4.8.2 True False False 37m -monitoring 4.8.2 True False False 29m -network 4.8.2 True False False 38m -node-tuning 4.8.2 True False False 37m -openshift-apiserver 4.8.2 True False False 32m -openshift-controller-manager 4.8.2 True False False 30m -openshift-samples 4.8.2 True False False 32m -operator-lifecycle-manager 4.8.2 True False False 37m -operator-lifecycle-manager-catalog 4.8.2 True False False 37m -operator-lifecycle-manager-packageserver 4.8.2 True False False 32m -service-ca 4.8.2 True False False 38m -storage 4.8.2 True False False 37m +authentication 4.9.0 True False False 19m +baremetal 4.9.0 True False False 37m +cloud-credential 4.9.0 True False False 40m +cluster-autoscaler 4.9.0 True False False 37m +config-operator 4.9.0 True False False 38m +console 4.9.0 True False False 26m +csi-snapshot-controller 4.9.0 True False False 37m +dns 4.9.0 True False False 37m +etcd 4.9.0 True False False 36m +image-registry 4.9.0 True False False 31m +ingress 4.9.0 True False False 30m +insights 4.9.0 True False False 31m +kube-apiserver 4.9.0 True False False 26m +kube-controller-manager 4.9.0 True False False 36m +kube-scheduler 4.9.0 True False False 36m +kube-storage-version-migrator 4.9.0 True False False 37m +machine-api 4.9.0 True False False 29m +machine-approver 4.9.0 True False False 37m +machine-config 4.9.0 True False False 36m +marketplace 4.9.0 True False False 37m +monitoring 4.9.0 True False False 29m +network 4.9.0 True False False 38m +node-tuning 4.9.0 True False False 37m +openshift-apiserver 4.9.0 True False False 32m +openshift-controller-manager 4.9.0 True False False 30m +openshift-samples 4.9.0 True False False 32m +operator-lifecycle-manager 4.9.0 True False False 37m +operator-lifecycle-manager-catalog 4.9.0 True False False 37m +operator-lifecycle-manager-packageserver 4.9.0 True False False 32m +service-ca 4.9.0 True False False 38m +storage 4.9.0 True False False 37m ---- . Configure the Operators that are not available. diff --git a/modules/installation-special-config-butane-create.adoc b/modules/installation-special-config-butane-create.adoc index c65a6e297743..0010d4e4cf7c 100644 --- a/modules/installation-special-config-butane-create.adoc +++ b/modules/installation-special-config-butane-create.adoc @@ -18,7 +18,7 @@ You can use Butane to produce a `MachineConfig` object so that you can configure [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 99-worker-custom labels: diff --git a/modules/installation-special-config-chrony.adoc b/modules/installation-special-config-chrony.adoc index de41fe4c27dd..b3806e4da9f9 100644 --- a/modules/installation-special-config-chrony.adoc +++ b/modules/installation-special-config-chrony.adoc @@ -37,7 +37,7 @@ See "Creating machine configs with Butane" for information about Butane. [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 99-worker-chrony <1> labels: diff --git a/modules/installation-special-config-kmod.adoc b/modules/installation-special-config-kmod.adoc index c3cfc55dfbde..bf80ab7cb760 100644 --- a/modules/installation-special-config-kmod.adoc +++ b/modules/installation-special-config-kmod.adoc @@ -387,7 +387,7 @@ See "Creating machine configs with Butane" for information about Butane. [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 99-simple-kmod labels: diff --git a/modules/installation-special-config-raid.adoc b/modules/installation-special-config-raid.adoc index b066cd20adb7..6a5e40171202 100644 --- a/modules/installation-special-config-raid.adoc +++ b/modules/installation-special-config-raid.adoc @@ -15,7 +15,7 @@ You can enable software RAID partitioning to provide an external data volume. {p [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: raid1-storage labels: @@ -58,7 +58,7 @@ storage: [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: raid1-alt-storage labels: diff --git a/modules/installation-special-config-storage.adoc b/modules/installation-special-config-storage.adoc index 3fe86bed4021..cc62516f0694 100644 --- a/modules/installation-special-config-storage.adoc +++ b/modules/installation-special-config-storage.adoc @@ -47,7 +47,7 @@ You can use the `threshold` attribute in your Butane configuration to define the [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: worker-storage labels: @@ -175,7 +175,7 @@ $ ./openshift-install create manifests --dir= <1> .Butane config example for a boot device ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: worker-storage <1> labels: diff --git a/modules/installing-cluster-loader.adoc b/modules/installing-cluster-loader.adoc index 4d02609f462d..d6fad16a3671 100644 --- a/modules/installing-cluster-loader.adoc +++ b/modules/installing-cluster-loader.adoc @@ -11,5 +11,5 @@ + [source,terminal] ---- -$ podman pull quay.io/openshift/origin-tests:4.8 +$ podman pull quay.io/openshift/origin-tests:4.9 ---- diff --git a/modules/investigating-kernel-crashes.adoc b/modules/investigating-kernel-crashes.adoc index 9bee2eba0060..851c22f43199 100644 --- a/modules/investigating-kernel-crashes.adoc +++ b/modules/investigating-kernel-crashes.adoc @@ -74,7 +74,7 @@ Create a `MachineConfig` object for cluster-wide configuration: [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 99-worker-kdump <1> labels: diff --git a/modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc b/modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc index 38369cdccad5..70921c1a8a18 100644 --- a/modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc +++ b/modules/ipi-install-configuring-ntp-for-disconnected-clusters.adoc @@ -34,7 +34,7 @@ See "Creating machine configs with Butane" for information about Butane. .Butane config example ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 99-master-chrony-conf-override labels: @@ -92,7 +92,7 @@ $ butane 99-master-chrony-conf-override.bu -o 99-master-chrony-conf-override.yam .Butane config example ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 99-worker-chrony-conf-override labels: diff --git a/modules/ipi-install-node-requirements.adoc b/modules/ipi-install-node-requirements.adoc index 72af95001b4a..faa8a8fc3368 100644 --- a/modules/ipi-install-node-requirements.adoc +++ b/modules/ipi-install-node-requirements.adoc @@ -2,6 +2,7 @@ // // * installing/installing_bare_metal_ipi/ipi-install-prerequisites.adoc :product-version: 4.8 + [id="node-requirements_{context}"'"] = Node requirements diff --git a/modules/ipi-install-retrieving-the-openshift-installer.adoc b/modules/ipi-install-retrieving-the-openshift-installer.adoc index 9bcdafd17ab8..4d3048fdb267 100644 --- a/modules/ipi-install-retrieving-the-openshift-installer.adoc +++ b/modules/ipi-install-retrieving-the-openshift-installer.adoc @@ -11,6 +11,6 @@ available version of {product-title}: [source,terminal] ---- -$ export VERSION=latest-4.8 +$ export VERSION=latest-4.9 export RELEASE_IMAGE=$(curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$VERSION/release.txt | grep 'Pull From: quay.io' | awk -F ' ' '{print $3}') ---- diff --git a/modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc b/modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc index 78265b6d883d..e543aad74d41 100644 --- a/modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc +++ b/modules/ipi-install-troubleshooting-ntp-out-of-sync.adoc @@ -65,7 +65,7 @@ See "Creating machine configs with Butane" for information about Butane. [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 99-master-chrony labels: diff --git a/modules/machineconfig-modify-journald.adoc b/modules/machineconfig-modify-journald.adoc index f4426fdf02ce..47e297bba603 100644 --- a/modules/machineconfig-modify-journald.adoc +++ b/modules/machineconfig-modify-journald.adoc @@ -26,7 +26,7 @@ See "Creating machine configs with Butane" for information about Butane. [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 40-worker-custom-journald labels: diff --git a/modules/manually-maintained-credentials-upgrade.adoc b/modules/manually-maintained-credentials-upgrade.adoc index 8535e7922c7d..b7467dbe9e7b 100644 --- a/modules/manually-maintained-credentials-upgrade.adoc +++ b/modules/manually-maintained-credentials-upgrade.adoc @@ -13,7 +13,7 @@ The Cloud Credential Operator (CCO) `Upgradable` status for a cluster with manua * For minor releases, for example, from 4.8 to 4.9, this status prevents you from upgrading until you have addressed any updated permissions and annotated the `CloudCredential` resource to indicate that the permissions are updated as needed for the next version. This annotation changes the `Upgradable` status to `True`. -* For z-stream releases, for example, from 4.8.9 to 4.8.10, no permissions are added or changed, so the upgrade is not blocked. +* For z-stream releases, for example, from 4.9.0 to 4.9.1, no permissions are added or changed, so the upgrade is not blocked. Before upgrading a cluster with manually maintained credentials, you must create any new credentials for the release image that you are upgrading to. Additionally, you must review the required permissions for existing credentials and accommodate any new permissions requirements in the new release for those components. diff --git a/modules/nodes-cluster-resource-override-deploy-cli.adoc b/modules/nodes-cluster-resource-override-deploy-cli.adoc index ec29a4ac6bab..0c49b1244304 100644 --- a/modules/nodes-cluster-resource-override-deploy-cli.adoc +++ b/modules/nodes-cluster-resource-override-deploy-cli.adoc @@ -84,7 +84,7 @@ metadata: name: clusterresourceoverride namespace: clusterresourceoverride-operator spec: - channel: "4.8" + channel: "4.9" name: clusterresourceoverride source: redhat-operators sourceNamespace: openshift-marketplace diff --git a/modules/nodes-nodes-viewing-listing.adoc b/modules/nodes-nodes-viewing-listing.adoc index adf3655e074a..d2c205c3d67b 100644 --- a/modules/nodes-nodes-viewing-listing.adoc +++ b/modules/nodes-nodes-viewing-listing.adoc @@ -59,9 +59,9 @@ $ oc get nodes -o wide [source,terminal] ---- NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -master.example.com Ready master 171m v1.22.1 10.0.129.108 Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.22.1-30.rhaos4.8.gitf2f339d.el8-dev -node1.example.com Ready worker 72m v1.22.1 10.0.129.222 Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.22.1-30.rhaos4.8.gitf2f339d.el8-dev -node2.example.com Ready worker 164m v1.22.1 10.0.142.150 Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.22.1-30.rhaos4.8.gitf2f339d.el8-dev +master.example.com Ready master 171m v1.22.1 10.0.129.108 Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.22.1-30.rhaos4.9.gitf2f339d.el8-dev +node1.example.com Ready worker 72m v1.22.1 10.0.129.222 Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.22.1-30.rhaos4.9.gitf2f339d.el8-dev +node2.example.com Ready worker 164m v1.22.1 10.0.142.150 Red Hat Enterprise Linux CoreOS 48.83.202103210901-0 (Ootpa) 4.18.0-240.15.1.el8_3.x86_64 cri-o://1.22.1-30.rhaos4.9.gitf2f339d.el8-dev ---- * The following command lists information about a single node: diff --git a/modules/nw-enabling-a-provisioning-network-after-installation.adoc b/modules/nw-enabling-a-provisioning-network-after-installation.adoc index 436912ac57d6..29374f83e278 100644 --- a/modules/nw-enabling-a-provisioning-network-after-installation.adoc +++ b/modules/nw-enabling-a-provisioning-network-after-installation.adoc @@ -8,7 +8,7 @@ The assisted installer and installer-provisioned installation for bare metal clusters provide the ability to deploy a cluster without a `provisioning` network. This capability is for scenarios such as proof-of-concept clusters or deploying exclusively with Redfish virtual media when each node's baseboard management controller is routable via the `baremetal` network. -In {product-title} 4.8 and later, you can enable a `provisioning` network after installation using the Cluster Baremetal Operator (CBO). +You can enable a `provisioning` network after installation using the Cluster Baremetal Operator (CBO). .Prerequisites diff --git a/modules/nw-ingress-converting-http-header-case.adoc b/modules/nw-ingress-converting-http-header-case.adoc index 27f0b6ef0ece..24b235a14243 100644 --- a/modules/nw-ingress-converting-http-header-case.adoc +++ b/modules/nw-ingress-converting-http-header-case.adoc @@ -9,7 +9,7 @@ HAProxy 2.2 lowercases HTTP header names by default, for example, changing `Host [IMPORTANT] ==== -Because {product-title} 4.8 includes HAProxy 2.2, make sure to add the necessary configuration by using `spec.httpHeaders.headerNameCaseAdjustments` before upgrading. +Because {product-title} 4.9 includes HAProxy 2.2, make sure to add the necessary configuration by using `spec.httpHeaders.headerNameCaseAdjustments` before upgrading. ==== .Prerequisites diff --git a/modules/nw-sriov-installing-operator.adoc b/modules/nw-sriov-installing-operator.adoc index ab6c718e66d3..07b2394e3636 100644 --- a/modules/nw-sriov-installing-operator.adoc +++ b/modules/nw-sriov-installing-operator.adoc @@ -91,7 +91,7 @@ $ oc get csv -n openshift-sriov-network-operator \ [source,terminal] ---- Name Phase -sriov-network-operator.4.4.0-202006160135 Succeeded +sriov-network-operator.4.9.0-202110121402 Succeeded ---- [id="install-operator-web-console_{context}"] diff --git a/modules/oauth-configuring-token-inactivity-timeout.adoc b/modules/oauth-configuring-token-inactivity-timeout.adoc index 89408308abb6..5501ab35a81e 100644 --- a/modules/oauth-configuring-token-inactivity-timeout.adoc +++ b/modules/oauth-configuring-token-inactivity-timeout.adoc @@ -57,7 +57,7 @@ Do not continue to the next step until `PROGRESSING` is listed as `False`, as sh [source,terminal] ---- NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -authentication 4.8.0 True False False 145m +authentication 4.9.0 True False False 145m ---- . Check that a new revision of the Kubernetes API server pods has rolled out. This will take several minutes. @@ -73,7 +73,7 @@ Do not continue to the next step until `PROGRESSING` is listed as `False`, as sh [source,terminal] ---- NAME VERSION AVAILABLE PROGRESSING DEGRADED SINCE -kube-apiserver 4.8.0 True False False 145m +kube-apiserver 4.9.0 True False False 145m ---- + If `PROGRESSING` is showing `True`, wait a few minutes and try again. diff --git a/modules/olm-about-catalogs.adoc b/modules/olm-about-catalogs.adoc index 278565a295e0..014b25b08714 100644 --- a/modules/olm-about-catalogs.adoc +++ b/modules/olm-about-catalogs.adoc @@ -26,3 +26,4 @@ Support for the legacy _package manifest format_ for Operators, including custom When creating custom catalog images, previous versions of {product-title} 4 required using the `oc adm catalog build` command, which was deprecated for several releases and is now removed. With the availability of Red Hat-provided index images starting in {product-title} 4.6, catalog builders must use the `opm index` command to manage index images. ==== +//Check on pulling this note during the 4.10 to 4.11 version scrub. diff --git a/modules/op-release-notes-1-5.adoc b/modules/op-release-notes-1-5.adoc index 638e83b6afae..37080d0b4ced 100644 --- a/modules/op-release-notes-1-5.adoc +++ b/modules/op-release-notes-1-5.adoc @@ -5,7 +5,7 @@ [id="op-release-notes-1-5_{context}"] = Release notes for {pipelines-title} General Availability 1.5 -{pipelines-title} General Availability (GA) 1.5 is now available on {product-title} 4.8. +{pipelines-title} General Availability (GA) 1.5 is now available on {product-title} 4.9. [id="compatibility-support-matrix-1-5_{context}"] == Compatibility and support matrix diff --git a/modules/osdk-control-compat.adoc b/modules/osdk-control-compat.adoc index 15efc1f40be4..05466106a168 100644 --- a/modules/osdk-control-compat.adoc +++ b/modules/osdk-control-compat.adoc @@ -16,7 +16,7 @@ When an API is removed from an {product-title} version, Operators running on tha [TIP] ==== -You can check the event alerts of your Operators running on {product-title} 4.8 and later to find whether there are any warnings about APIs currently in use. The following alerts fire when they detect an API in use that will be removed in the next release: +You can check the event alerts of your Operators to find whether there are any warnings about APIs currently in use. The following alerts fire when they detect an API in use that will be removed in the next release: `APIRemovedInNextReleaseInUse`:: APIs that will be removed in the next {product-title} release. diff --git a/modules/psap-driver-toolkit-pulling.adoc b/modules/psap-driver-toolkit-pulling.adoc index 2f6d4886d6a9..d13323250016 100644 --- a/modules/psap-driver-toolkit-pulling.adoc +++ b/modules/psap-driver-toolkit-pulling.adoc @@ -11,7 +11,7 @@ The `driver-toolkit` image is available from the link:https://registry.redhat.io == Pulling the Driver Toolkit container image from registry.redhat.io Instructions for pulling the `driver-toolkit` image from `registry.redhat.io` with podman or in {product-title} can be found on the link:https://catalog.redhat.com/software/containers/openshift4/driver-toolkit-rhel8/604009d6122bd89307e00865?container-tabs=gti[Red Hat Ecosystem Catalog]. -The driver-toolkit image for the latest minor release will be tagged with the minor release version on registry.redhat.io for example `registry.redhat.io/openshift4/driver-toolkit-rhel8:v4.8`. +The driver-toolkit image for the latest minor release will be tagged with the minor release version on registry.redhat.io for example `registry.redhat.io/openshift4/driver-toolkit-rhel8:v4.9`. [id="pulling-the-driver-toolkit-from-payload"] == Finding the Driver Toolkit image URL in the payload @@ -27,7 +27,7 @@ The driver-toolkit image for the latest minor release will be tagged with the mi + [source,terminal] ---- -$ oc adm release info 4.8.0 --image-for=driver-toolkit +$ oc adm release info 4.9.0 --image-for=driver-toolkit ---- + .Example output diff --git a/modules/psap-driver-toolkit-using.adoc b/modules/psap-driver-toolkit-using.adoc index 53006cecad45..6b11f8f0f0d6 100644 --- a/modules/psap-driver-toolkit-using.adoc +++ b/modules/psap-driver-toolkit-using.adoc @@ -105,7 +105,7 @@ spec: name: simple-kmod-driver-container:demo ---- -. Substitute the correct driver toolkit image for the {product-title} version you are running in place of “DRIVER_TOOLKIT_IMAGE” with the following commands. +. Substitute the correct driver toolkit image for the {product-title} version you are running in place of “DRIVER_TOOLKIT_IMAGE” with the following commands. + [source,terminal] ---- @@ -121,11 +121,6 @@ $ DRIVER_TOOLKIT_IMAGE=$(oc adm release info $OCP_VERSION --image-for=driver-too ---- $ sed "s#DRIVER_TOOLKIT_IMAGE#${DRIVER_TOOLKIT_IMAGE}#" 0000-buildconfig.yaml.template > 0000-buildconfig.yaml ---- -+ -[NOTE] -==== -The driver toolkit was introduced to {product-title} 4.6 as of version 4.6.30, in 4.7 as of version 4.7.11, and in 4.8. -==== . Create the image stream and build config with + @@ -134,7 +129,7 @@ The driver toolkit was introduced to {product-title} 4.6 as of version 4.6.30, i $ oc create -f 0000-buildconfig.yaml ---- -. After the builder pod completes successfully, deploy the driver container image as a `DaemonSet`. +. After the builder pod completes successfully, deploy the driver container image as a `DaemonSet`. .. The driver container must run with the privileged security context in order to load the kernel modules on the host. The following YAML file contains the RBAC rules and the `DaemonSet` for running the driver container. Save this YAML as `1000-drivercontainer.yaml`. + @@ -210,7 +205,7 @@ spec: $ oc create -f 1000-drivercontainer.yaml ---- -. After the pods are running on the worker nodes, verify that the `simple_kmod` kernel module is loaded successfully on the host machines with `lsmod`. +. After the pods are running on the worker nodes, verify that the `simple_kmod` kernel module is loaded successfully on the host machines with `lsmod`. .. Verify that the pods are running: + diff --git a/modules/psap-installing-node-feature-discovery-operator.adoc b/modules/psap-installing-node-feature-discovery-operator.adoc index 840ba01630f5..faec004a54ce 100644 --- a/modules/psap-installing-node-feature-discovery-operator.adoc +++ b/modules/psap-installing-node-feature-discovery-operator.adoc @@ -73,7 +73,7 @@ $ oc get packagemanifest nfd -n openshift-marketplace -o jsonpath='{.status.defa .Example output [source,terminal] ---- -4.8 +4.9 ---- .. Create the following `Subscription` CR and save the YAML in the `nfd-sub.yaml` file: @@ -87,7 +87,7 @@ metadata: name: nfd namespace: openshift-nfd spec: - channel: "4.8" + channel: "4.9" installPlanApproval: Automatic name: nfd source: redhat-operators diff --git a/modules/psap-using-node-feature-discovery-operator.adoc b/modules/psap-using-node-feature-discovery-operator.adoc index 7229474ec08b..7524a195c883 100644 --- a/modules/psap-using-node-feature-discovery-operator.adoc +++ b/modules/psap-using-node-feature-discovery-operator.adoc @@ -36,7 +36,7 @@ spec: instance: "" # instance is empty by default operand: namespace: openshift-nfd - image: quay.io/openshift/origin-node-feature-discovery:4.8 + image: quay.io/openshift/origin-node-feature-discovery:4.9 imagePullPolicy: Always workerConfig: configData: | diff --git a/modules/rhel-compute-updating.adoc b/modules/rhel-compute-updating.adoc index e394f38f9375..acc028817756 100644 --- a/modules/rhel-compute-updating.adoc +++ b/modules/rhel-compute-updating.adoc @@ -45,7 +45,7 @@ Because the {op-system-base} machines require assets that are generated by the c + [NOTE] ==== -By default, the base OS RHEL with "Minimal" installation option enables firewalld serivce. Having the firewalld service enabled on your host prevents you from accessing {product-title} logs on the worker. Do not enable firewalld later if you wish to continue accessing {product-title} logs on the worker. +By default, the base OS RHEL with "Minimal" installation option enables firewalld serivce. Having the firewalld service enabled on your host prevents you from accessing {product-title} logs on the worker. Do not enable firewalld later if you wish to continue accessing {product-title} logs on the worker. ==== . Enable the repositories that are required for {product-title} {product-version}: diff --git a/modules/running-cluster-loader.adoc b/modules/running-cluster-loader.adoc index 6425872e81cd..1a4e157e476a 100644 --- a/modules/running-cluster-loader.adoc +++ b/modules/running-cluster-loader.adoc @@ -19,7 +19,7 @@ template builds and waits for them to complete: [source,terminal] ---- $ podman run -v ${LOCAL_KUBECONFIG}:/root/.kube/config:z -i \ -quay.io/openshift/origin-tests:4.8 /bin/bash -c 'export KUBECONFIG=/root/.kube/config && \ +quay.io/openshift/origin-tests:4.9 /bin/bash -c 'export KUBECONFIG=/root/.kube/config && \ openshift-tests run-test "[sig-scalability][Feature:Performance] Load cluster \ should populate the cluster [Slow][Serial] [Suite:openshift]"' ---- @@ -31,7 +31,7 @@ setting the environment variable for `VIPERCONFIG`: ---- $ podman run -v ${LOCAL_KUBECONFIG}:/root/.kube/config:z \ -v ${LOCAL_CONFIG_FILE_PATH}:/root/configs/:z \ --i quay.io/openshift/origin-tests:4.8 \ +-i quay.io/openshift/origin-tests:4.9 \ /bin/bash -c 'KUBECONFIG=/root/.kube/config VIPERCONFIG=/root/configs/test.yaml \ openshift-tests run-test "[sig-scalability][Feature:Performance] Load cluster \ should populate the cluster [Slow][Serial] [Suite:openshift]"' diff --git a/modules/understanding-upgrade-channels.adoc b/modules/understanding-upgrade-channels.adoc index 4d0792f9b7e8..bd36e7ded14e 100644 --- a/modules/understanding-upgrade-channels.adoc +++ b/modules/understanding-upgrade-channels.adoc @@ -9,7 +9,7 @@ [id="understanding-upgrade-channels_{context}"] = {product-title} upgrade channels and releases -In {product-title} 4.1, Red Hat introduced the concept of channels for recommending the appropriate release versions for cluster upgrades. By controlling the pace of upgrades, these upgrade channels allow you to choose an upgrade strategy. Upgrade channels are tied to a minor version of {product-title}. For instance, {product-title} 4.8 upgrade channels recommend upgrades to 4.8 and upgrades within 4.8. They also recommend upgrades within 4.7 and from 4.7 to 4.8, to allow clusters on 4.7 to eventually upgrade to 4.8. They do not recommend upgrades to 4.9 or later releases. This strategy ensures that administrators explicitly decide to upgrade to the next minor version of {product-title}. +In {product-title} 4.1, Red Hat introduced the concept of channels for recommending the appropriate release versions for cluster upgrades. By controlling the pace of upgrades, these upgrade channels allow you to choose an upgrade strategy. Upgrade channels are tied to a minor version of {product-title}. For instance, {product-title} 4.9 upgrade channels recommend upgrades to 4.9 and upgrades within 4.9. They also recommend upgrades within 4.8 and from 4.8 to 4.9, to allow clusters on 4.8 to eventually upgrade to 4.9. They do not recommend upgrades to 4.10 or later releases. This strategy ensures that administrators explicitly decide to upgrade to the next minor version of {product-title}. Upgrade channels control only release selection and do not impact the version of the cluster that you install; the `openshift-install` binary file for a specific version of {product-title} always installs that version. @@ -140,7 +140,7 @@ A channel can be switched from the web console or through the `adm upgrade chann $ oc adm upgrade channel clusterversion version --type json -p '[{"op": "add", "path": "/spec/channel", "value": "”}]' ---- -The web console will display an alert if you switch to a channel that does not include the current release. The web console does not recommend any updates while on a channel without the current release. You can return to the original channel at any point, however. +The web console will display an alert if you switch to a channel that does not include the current release. The web console does not recommend any updates while on a channel without the current release. You can return to the original channel at any point, however. Changing your channel might impact the supportability of your cluster. The following conditions might apply: diff --git a/modules/update-oc-configmap-signature-verification.adoc b/modules/update-oc-configmap-signature-verification.adoc index a1f597e7e7d7..09c54714e3c4 100644 --- a/modules/update-oc-configmap-signature-verification.adoc +++ b/modules/update-oc-configmap-signature-verification.adoc @@ -7,14 +7,9 @@ Before you update your cluster, you must manually create a config map that contains the signatures of the release images that you use. This signature allows the Cluster Version Operator (CVO) to verify that the release images have not been modified by comparing the expected and actual image signatures. -[NOTE] -==== -If you are upgrading from a release prior to version 4.4.8, you must use the manual method for creating the config map instead of this procedure. The commands that this procedure uses are not in earlier versions of the `oc` command-line interface (CLI). -==== - .Prerequisites -* Install the OpenShift CLI (`oc`), version 4.4.8 or later. +* Install the OpenShift CLI (`oc`). .Procedure diff --git a/modules/update-preparing-evaluate-alerts.adoc b/modules/update-preparing-evaluate-alerts.adoc index 91a1cddbf5b8..c3cdc5afde2e 100644 --- a/modules/update-preparing-evaluate-alerts.adoc +++ b/modules/update-preparing-evaluate-alerts.adoc @@ -5,7 +5,7 @@ [id="update-preparing-evaluate-alerts_{context}"] = Reviewing alerts to identify uses of removed APIs -{product-title} 4.8 introduced two new alerts that fire when an API is in use that will be removed in the next release: +Two alerts fire when an API is in use that will be removed in the next release: * `APIRemovedInNextReleaseInUse` - for APIs that will be removed in the next {product-title} release. * `APIRemovedInNextEUSReleaseInUse` - for APIs that will be removed in the next {product-title} Extended Update Support (EUS) release. diff --git a/modules/update-restricted.adoc b/modules/update-restricted.adoc index 923445d4a81a..4c9c4eb6bdf9 100644 --- a/modules/update-restricted.adoc +++ b/modules/update-restricted.adoc @@ -19,7 +19,7 @@ If you have a local OpenShift Update Service, you can update by using the connec * You mirrored the images for the new release to your registry. * You applied the release image signature ConfigMap for the new release to your cluster. * You obtained the sha256 sum value for the release from the image signature ConfigMap. -* Install the OpenShift CLI (`oc`), version 4.4.8 or later. +* Install the OpenShift CLI (`oc`). * Pause all `MachineHealthCheck` resources. .Procedure diff --git a/modules/update-upgrading-cli.adoc b/modules/update-upgrading-cli.adoc index fd68d0eb3ffa..049da9566861 100644 --- a/modules/update-upgrading-cli.adoc +++ b/modules/update-upgrading-cli.adoc @@ -33,11 +33,11 @@ $ oc get clusterversion [source,terminal] ---- NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -version 4.6.9 True False 158m Cluster version is 4.6.9 +version 4.8.13 True False 158m Cluster version is 4.8.13 ---- . Review the current update channel information and confirm that your channel -is set to `stable-4.8`: +is set to `stable-4.9`: + [source,terminal] ---- @@ -48,9 +48,8 @@ $ oc get clusterversion -o json|jq ".items[0].spec" [source,terminal] ---- { - "channel": "stable-4.8", + "channel": "stable-4.9", "clusterID": "990f7ab8-109b-4c95-8480-2bd1deec55ff", - "upstream": "https://api.openshift.com/api/upgrades_info/v1/graph" } ---- + @@ -70,12 +69,12 @@ $ oc adm upgrade .Example output [source,terminal] ---- -Cluster version is 4.1.0 +Cluster version is 4.8.13 Updates: VERSION IMAGE -4.1.2 quay.io/openshift-release-dev/ocp-release@sha256:9c5f0df8b192a0d7b46cd5f6a4da2289c155fd5302dec7954f8f06c878160b8b +4.9.0 quay.io/openshift-release-dev/ocp-release@sha256:9c5f0df8b192a0d7b46cd5f6a4da2289c155fd5302dec7954f8f06c878160b8b ---- . Apply an update: @@ -106,14 +105,12 @@ $ oc get clusterversion -o json|jq ".items[0].spec" [source,terminal] ---- { - "channel": "stable-4.8", + "channel": "stable-4.9", "clusterID": "990f7ab8-109b-4c95-8480-2bd1deec55ff", "desiredUpdate": { "force": false, "image": "quay.io/openshift-release-dev/ocp-release@sha256:9c5f0df8b192a0d7b46cd5f6a4da2289c155fd5302dec7954f8f06c878160b8b", - "version": "4.8.0" <1> - }, - "upstream": "https://api.openshift.com/api/upgrades_info/v1/graph" + "version": "4.9.0" <1> } ---- <1> If the `version` number in the `desiredUpdate` stanza matches the value that @@ -137,7 +134,7 @@ $ oc get clusterversion -o json|jq ".items[0].status.history" "startedTime": "2021-01-28T20:30:50Z", "state": "Partial", "verified": true, - "version": "4.8.0" + "version": "4.9.0" }, { "completionTime": "2021-01-28T20:30:50Z", @@ -145,7 +142,7 @@ $ oc get clusterversion -o json|jq ".items[0].status.history" "startedTime": "2021-01-28T17:38:10Z", "state": "Completed", "verified": false, - "version": "4.8.0" + "version": "4.8.13" } ] ---- @@ -175,7 +172,7 @@ $ oc get clusterversion [source,terminal] ---- NAME VERSION AVAILABLE PROGRESSING SINCE STATUS -version 4.8.0 True False 2m Cluster version is 4.8.0 +version 4.9.0 True False 2m Cluster version is 4.9.0 ---- . If you are upgrading your cluster to the next minor version, like version 4.y to 4.(y+1), it is recommended to confirm your nodes are upgraded before deploying workloads that rely on a new feature: diff --git a/modules/viewing-the-image-pull-source.adoc b/modules/viewing-the-image-pull-source.adoc index 5ae97edfe6b8..d52e94deb01b 100644 --- a/modules/viewing-the-image-pull-source.adoc +++ b/modules/viewing-the-image-pull-source.adoc @@ -7,7 +7,7 @@ For clusters with unrestricted network connectivity, you can view the source of your pulled images by using a command on a node, such as `crictl images`. -However, for disconnected installations, to view the source of pulled images, you must review the CRI-O logs to locate the `Trying to access` log entry, as shown in the following procedure. Other methods to view the image pull source, such as the `crictl images` command, show the non-mirrored image name, even though the image is pulled from the mirrored location. +However, for disconnected installations, to view the source of pulled images, you must review the CRI-O logs to locate the `Trying to access` log entry, as shown in the following procedure. Other methods to view the image pull source, such as the `crictl images` command, show the non-mirrored image name, even though the image is pulled from the mirrored location. .Prerequisites @@ -29,13 +29,13 @@ The `Trying to access` log entry indicates where the image is being pulled from. [source,terminal] ---- ... -Mar 17 02:52:50 ip-10-0-138-140.ec2.internal crio[1366]: time="2021-08-05 10:33:21.594930907Z" level=info msg="Pulling image: quay.io/openshift-release-dev/ocp-release:4.8.4-ppc64le" id=abcd713b-d0e1-4844-ac1c-474c5b60c07c name=/runtime.v1alpha2.ImageService/PullImage +Mar 17 02:52:50 ip-10-0-138-140.ec2.internal crio[1366]: time="2021-08-05 10:33:21.594930907Z" level=info msg="Pulling image: quay.io/openshift-release-dev/ocp-release:4.9.0-ppc64le" id=abcd713b-d0e1-4844-ac1c-474c5b60c07c name=/runtime.v1alpha2.ImageService/PullImage Mar 17 02:52:50 ip-10-0-138-140.ec2.internal crio[1484]: time="2021-03-17 02:52:50.194341109Z" level=info msg="Trying to access \"li0317gcp1.mirror-registry.qe.gcp.devcluster.openshift.com:5000/ocp/release@sha256:1926eae7cacb9c00f142ec98b00628970e974284b6ddaf9a6a086cb9af7a6c31\"" Mar 17 02:52:50 ip-10-0-138-140.ec2.internal crio[1484]: time="2021-03-17 02:52:50.226788351Z" level=info msg="Trying to access \"li0317gcp1.mirror-registry.qe.gcp.devcluster.openshift.com:5000/ocp/release@sha256:1926eae7cacb9c00f142ec98b00628970e974284b6ddaf9a6a086cb9af7a6c31\"" ... ---- + -The log might show the image pull source twice, as shown in the preceding example. +The log might show the image pull source twice, as shown in the preceding example. + If your `ImageContentSourcePolicy` object lists multiple mirrors, {product-title} attempts to pull the images in the order listed in the configuration, for example: + @@ -43,4 +43,3 @@ If your `ImageContentSourcePolicy` object lists multiple mirrors, {product-title Trying to access \"li0317gcp1.mirror-registry.qe.gcp.devcluster.openshift.com:5000/ocp/release@sha256:1926eae7cacb9c00f142ec98b00628970e974284b6ddaf9a6a086cb9af7a6c31\" Trying to access \"li0317gcp2.mirror-registry.qe.gcp.devcluster.openshift.com:5000/ocp/release@sha256:1926eae7cacb9c00f142ec98b00628970e974284b6ddaf9a6a086cb9af7a6c31\" ---- - diff --git a/modules/virt-binding-devices-vfio-driver.adoc b/modules/virt-binding-devices-vfio-driver.adoc index d3404261e821..ca583f3f6546 100644 --- a/modules/virt-binding-devices-vfio-driver.adoc +++ b/modules/virt-binding-devices-vfio-driver.adoc @@ -34,7 +34,7 @@ See "Creating machine configs with Butane" for information about Butane. [source,yaml] ---- variant: openshift -version: 4.8.0 +version: 4.9.0 metadata: name: 100-worker-vfiopci labels: diff --git a/modules/virt-enabling-virt-repos.adoc b/modules/virt-enabling-virt-repos.adoc index a63b803817a9..9762ee338a03 100644 --- a/modules/virt-enabling-virt-repos.adoc +++ b/modules/virt-enabling-virt-repos.adoc @@ -8,9 +8,9 @@ Red Hat offers {VirtProductName} repositories for both Red Hat Enterprise Linux 8 and Red Hat Enterprise Linux 7: -* Red Hat Enterprise Linux 8 repository: `cnv-4.8-for-rhel-8-x86_64-rpms` +* Red Hat Enterprise Linux 8 repository: `cnv-4.9-for-rhel-8-x86_64-rpms` -* Red Hat Enterprise Linux 7 repository: `rhel-7-server-cnv-4.8-rpms` +* Red Hat Enterprise Linux 7 repository: `rhel-7-server-cnv-4.9-rpms` The process for enabling the repository in `subscription-manager` is the same in both platforms. diff --git a/modules/ztp-acm-adding-images-to-mirror-registry.adoc b/modules/ztp-acm-adding-images-to-mirror-registry.adoc index f3ce00ab1885..ba810f61257e 100644 --- a/modules/ztp-acm-adding-images-to-mirror-registry.adoc +++ b/modules/ztp-acm-adding-images-to-mirror-registry.adoc @@ -37,9 +37,9 @@ $ export ROOTFS_IMAGE_NAME= <2> ---- $ export OCP_VERSION= <3> ---- -<1> ISO image name, for example, `rhcos-4.8.0-fc.9-x86_64-live.x86_64.iso` -<2> RootFS image name, for example, `rhcos-4.8.0-fc.9-x86_64-live-rootfs.x86_64.img` -<3> {product-title} version, for example, `latest-4.8` +<1> ISO image name, for example, `rhcos-4.9.0-fc.1-x86_64-live.x86_64.iso` +<2> RootFS image name, for example, `rhcos-4.9.0-fc.1-x86_64-live-rootfs.x86_64.img` +<3> {product-title} version, for example, `latest-4.9` .. Download the required images: + @@ -67,7 +67,7 @@ $ wget http://$(hostname)/${ISO_IMAGE_NAME} [source,terminal] ---- ... -Saving to: rhcos-4.8.0-fc.9-x86_64-live.x86_64.iso -rhcos-4.8.0-fc.8-x86_64- 11%[====> ] 10.01M 4.71MB/s +Saving to: rhcos-4.9.0-fc.1-x86_64-live.x86_64.iso +rhcos-4.9.0-fc.1-x86_64- 11%[====> ] 10.01M 4.71MB/s ... ---- diff --git a/modules/ztp-acm-installing-disconnected-rhacm.adoc b/modules/ztp-acm-installing-disconnected-rhacm.adoc index 7ad0c001b347..6e2b3b340c43 100644 --- a/modules/ztp-acm-installing-disconnected-rhacm.adoc +++ b/modules/ztp-acm-installing-disconnected-rhacm.adoc @@ -16,7 +16,7 @@ You use {rh-rhacm-first} on a hub cluster in the disconnected environment to man [NOTE] ==== If you want to deploy Operators to the spoke clusters, you must also add them to this registry. -See link:https://docs.openshift.com/container-platform/4.8/operators/admin/olm-restricted-networks.html#olm-mirror-catalog_olm-restricted-networks[Mirroring an Operator catalog] for more information. +See link:https://docs.openshift.com/container-platform/4.9/operators/admin/olm-restricted-networks.html#olm-mirror-catalog_olm-restricted-networks[Mirroring an Operator catalog] for more information. ==== .Procedure diff --git a/modules/ztp-creating-the-siteconfig-custom-resources.adoc b/modules/ztp-creating-the-siteconfig-custom-resources.adoc index a0e05e162455..909fa1d55425 100644 --- a/modules/ztp-creating-the-siteconfig-custom-resources.adoc +++ b/modules/ztp-creating-the-siteconfig-custom-resources.adoc @@ -23,7 +23,7 @@ spec: baseDomain: "clus2.t5g.lab.eng.bos.redhat.com" pullSecretRef: name: "assisted-deployment-pull-secret" - clusterImageSetNameRef: "openshift-4.8" + clusterImageSetNameRef: "openshift-4.9" sshPublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDB3dwhI5X0ZxGBb9VK7wclcPHLc8n7WAyKjTNInFjYNP9J+Zoc/ii+l3YbGUTuqilDwZN5rVIwBux2nUyVXDfaM5kPd9kACmxWtfEWTyVRootbrNWwRfKuC2h6cOd1IlcRBM1q6IzJ4d7+JVoltAxsabqLoCbK3svxaZoKAaK7jdGG030yvJzZaNM4PiTy39VQXXkCiMDmicxEBwZx1UsA8yWQsiOQ5brod9KQRXWAAST779gbvtgXR2L+MnVNROEHf1nEjZJwjwaHxoDQYHYKERxKRHlWFtmy5dNT6BbvOpJ2e5osDFPMEd41d2mUJTfxXiC1nvyjk9Irf8YJYnqJgBIxi0IxEllUKH7mTdKykHiPrDH5D2pRlp+Donl4n+sw6qoDc/3571O93+RQ6kUSAgAsvWiXrEfB/7kGgAa/BD5FeipkFrbSEpKPVu+gue1AQeJcz9BuLqdyPUQj2VUySkSg0FuGbG7fxkKeF1h3Sga7nuDOzRxck4I/8Z7FxMF/e8DmaBpgHAUIfxXnRqAImY9TyAZUEMT5ZPSvBRZNNmLbfex1n3NLcov/GEpQOqEYcjG5y57gJ60/av4oqjcVmgtaSOOAS0kZ3y9YDhjsaOcpmRYYijJn8URAH7NrW8EZsvAoF6GUt6xHq5T258c6xSYUm5L0iKvBqrOW9EjbLw== root@cnfdc2.clus2.t5g.lab.eng.bos.redhat.com" clusters: - clusterName: "test-sno" diff --git a/operators/operator_sdk/osdk-pkgman-to-bundle.adoc b/operators/operator_sdk/osdk-pkgman-to-bundle.adoc index 32f4b12694a8..5163d3cad8bf 100644 --- a/operators/operator_sdk/osdk-pkgman-to-bundle.adoc +++ b/operators/operator_sdk/osdk-pkgman-to-bundle.adoc @@ -6,6 +6,7 @@ include::modules/common-attributes.adoc[] toc::[] Support for the legacy _package manifest format_ for Operators is removed in {product-title} 4.8 and later. If you have an Operator project that was initially created using the package manifest format, you can use the Operator SDK to migrate the project to the bundle format. The bundle format is the preferred packaging format for Operator Lifecycle Manager (OLM) starting in {product-title} 4.6. +//Consider updating this during the 4.10 to 4.11 version scrub. include::modules/osdk-about-pkg-format-migration.adoc[leveloffset=+1] .Additional resources diff --git a/operators/operator_sdk/osdk-upgrading-projects.adoc b/operators/operator_sdk/osdk-upgrading-projects.adoc index ee7282fd88d1..fa8aab345725 100644 --- a/operators/operator_sdk/osdk-upgrading-projects.adoc +++ b/operators/operator_sdk/osdk-upgrading-projects.adoc @@ -25,6 +25,7 @@ include::modules/osdk-upgrading-v180-to-v1101.adoc[leveloffset=+1] * xref:../../operators/operator_sdk/osdk-pkgman-to-bundle.adoc#osdk-pkgman-to-bundle[Migrating package manifest projects to bundle format] * link:https://docs.openshift.com/container-platform/4.8/operators/operator_sdk/osdk-upgrading-projects.html#osdk-upgrading-v130-to-v180_osdk-upgrading-projects[Upgrading projects for Operator SDK v1.8.0] +//Consider updating this during the 4.10 to 4.11 version scrub. :!osdk_ver: :!osdk_ver_n1: diff --git a/operators/understanding/olm-packaging-format.adoc b/operators/understanding/olm-packaging-format.adoc index c32c89cf3478..07c0918dc3a5 100644 --- a/operators/understanding/olm-packaging-format.adoc +++ b/operators/understanding/olm-packaging-format.adoc @@ -11,6 +11,7 @@ This guide outlines the packaging format for Operators supported by Operator Lif ==== Support for the legacy _package manifest format_ for Operators is removed in {product-title} 4.8 and later. Existing Operator projects in the package manifest format can be migrated to the bundle format by using the Operator SDK `pkgman-to-bundle` command. See xref:../../operators/operator_sdk/osdk-pkgman-to-bundle.adoc#osdk-pkgman-to-bundle[Migrating package manifest projects to bundle format] for more details. ==== +//Consider updating this during the 4.10 to 4.11 version scrub. include::modules/olm-bundle-format.adoc[leveloffset=+1] include::modules/olm-bundle-format-dependencies-file.adoc[leveloffset=+2] diff --git a/welcome/index.adoc b/welcome/index.adoc index 047d0dce61d0..5d2b8d8db8fa 100644 --- a/welcome/index.adoc +++ b/welcome/index.adoc @@ -16,7 +16,7 @@ Start with xref:../architecture/architecture.adoc#architecture-overview-architec xref:../security/container_security/security-understanding.adoc#understanding-security[Security and compliance]. ifdef::openshift-enterprise,openshift-webscale[] Then, see the -xref:../release_notes/ocp-4-8-release-notes.adoc#ocp-4-8-release-notes[release notes]. +xref:../release_notes/ocp-4-9-release-notes.adoc#ocp-4-9-release-notes[release notes]. endif::[] endif::[] @@ -71,7 +71,7 @@ user-provisioned infrastructure on xref:../installing/installing_aws/installing-restricted-networks-aws.adoc#installing-restricted-networks-aws[AWS], xref:../installing/installing_gcp/installing-restricted-networks-gcp.adoc#installing-restricted-networks-gcp[GCP], xref:../installing/installing_vsphere/installing-restricted-networks-vsphere.adoc#installing-restricted-networks-vsphere[vSphere], -xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[IBM Z and LinuxONE with z/VM], xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[IBM Z and LinuxONE with RHEL KVM], xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[IBM Power Systems], +xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z.adoc#installing-restricted-networks-ibm-z[IBM Z and LinuxONE with z/VM], xref:../installing/installing_ibm_z/installing-restricted-networks-ibm-z-kvm.adoc#installing-restricted-networks-ibm-z-kvm[IBM Z and LinuxONE with RHEL KVM], xref:../installing/installing_ibm_power/installing-restricted-networks-ibm-power.adoc#installing-restricted-networks-ibm-power[IBM Power Systems], or xref:../installing/installing_bare_metal/installing-restricted-networks-bare-metal.adoc#installing-restricted-networks-bare-metal[bare metal] does not have full access to the internet, then