From 6326bd313a4e9fa0f15e847825aac448e1e9c7d7 Mon Sep 17 00:00:00 2001 From: vkadapar Date: Fri, 21 Nov 2025 16:57:45 -0500 Subject: [PATCH 01/13] SDCICD-1691: migrate AD_HOC_TEST_IMAGES to TEST_SUITES_YAML with SLACK_CHANNEL --- test/e2e/e2e-template.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/e2e/e2e-template.yml b/test/e2e/e2e-template.yml index 781d7f8..dcb709f 100644 --- a/test/e2e/e2e-template.yml +++ b/test/e2e/e2e-template.yml @@ -36,6 +36,8 @@ parameters: required: false - name: GEMINI_API_KEY required: false + - name: SLACK_CHANNEL + required: false objects: - apiVersion: batch/v1 kind: Job @@ -67,8 +69,10 @@ objects: seccompProfile: type: RuntimeDefault env: - - name: AD_HOC_TEST_IMAGES - value: ${TEST_IMAGE}:${IMAGE_TAG} + - name: TEST_SUITES_YAML + value: |- + - image: ${TEST_IMAGE}:${IMAGE_TAG} + slackChannel: ${SLACK_CHANNEL} - name: OCM_CLIENT_ID value: ${OCM_CLIENT_ID} - name: OCM_CLIENT_SECRET From 403823943cc5d09a2513caa6448ba49830a60aa7 Mon Sep 17 00:00:00 2001 From: ritmun Date: Mon, 24 Nov 2025 10:10:38 -0600 Subject: [PATCH 02/13] bp update --- OWNERS_ALIASES | 4 ++-- boilerplate/_data/last-boilerplate-commit | 2 +- boilerplate/_lib/container-make | 15 ++++++++++++--- boilerplate/openshift/golang-osd-e2e/update | 2 +- .../openshift/golang-osd-operator/OWNERS_ALIASES | 3 ++- .../openshift/golang-osd-operator/standard.mk | 6 ++++++ build/Dockerfile | 2 +- build/Dockerfile.olm-registry | 2 +- test/e2e/osd_example_operator_runner_test.go | 9 --------- test/e2e/osd_example_operator_tests.go | 8 +++++++- 10 files changed, 33 insertions(+), 20 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 27c8909..ca5fda7 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -5,7 +5,9 @@ aliases: srep-functional-team-aurora: - abyrne55 + - AlexSmithGH - dakotalongRH + - eth1030 - joshbranham - luis-falcon - reedcort @@ -65,14 +67,12 @@ aliases: - feichashao - samanthajayasinghe - xiaoyu74 - - Dee-6777 - Tessg22 - smarthall srep-infra-cicd: - ritmun - yiqinzhang - varunraokadaparthi - - christophermancini srep-functional-leads: - abyrne55 - clcollins diff --git a/boilerplate/_data/last-boilerplate-commit b/boilerplate/_data/last-boilerplate-commit index f5222b6..afb18cd 100644 --- a/boilerplate/_data/last-boilerplate-commit +++ b/boilerplate/_data/last-boilerplate-commit @@ -1 +1 @@ -d7285a904eda6cf842ddff8c648dedb223934a75 +b1c2b442985aaf395dde19b28fa5f139563eda05 diff --git a/boilerplate/_lib/container-make b/boilerplate/_lib/container-make index e847e3a..7783458 100755 --- a/boilerplate/_lib/container-make +++ b/boilerplate/_lib/container-make @@ -4,6 +4,7 @@ if [[ "$1" == "-h"* ]] || [[ "$1" == "--h"* ]]; then echo "Usage: $0 {arguments to the real 'make'}" echo "Runs 'make' in the boilerplate backing container." echo "If the command fails, starts a shell in the container so you can debug." + echo "Set NONINTERACTIVE=true (or TRUE) to skip the debug shell and exit with the make return code." exit -1 fi @@ -40,12 +41,20 @@ banner "Running: make $@" $CONTAINER_ENGINE $args make "$@" rc=$? -# If it failed, drop into the container in a shell +# If it failed, check if we should drop into a shell or exit if [[ $rc -ne 0 ]]; then - banner "The 'make' command failed! Starting a shell in the container for debugging. Just 'exit' when done." - $CONTAINER_ENGINE $args /bin/bash + # Case-insensitive check for NONINTERACTIVE (true, TRUE, True all work) + if [[ "${NONINTERACTIVE,,}" == "true" ]]; then + banner "The 'make' command failed with exit code $rc. Skipping debug shell (NONINTERACTIVE=${NONINTERACTIVE})." + else + banner "The 'make' command failed! Starting a shell in the container for debugging. Just 'exit' when done." + $CONTAINER_ENGINE $args /bin/bash + fi fi # Finally, remove the container banner "Cleaning up the container" $CONTAINER_ENGINE rm -f $container_id >/dev/null + +# Exit with the return code from make +exit $rc diff --git a/boilerplate/openshift/golang-osd-e2e/update b/boilerplate/openshift/golang-osd-e2e/update index 1b4812f..031e2c1 100755 --- a/boilerplate/openshift/golang-osd-e2e/update +++ b/boilerplate/openshift/golang-osd-e2e/update @@ -21,7 +21,7 @@ OPERATOR_NAME_CAMEL_CASE=${OPERATOR_PROPER_NAME// /} mkdir -p "${E2E_SUITE_DIRECTORY}" -E2E_SUITE_BUILDER_IMAGE=registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 +E2E_SUITE_BUILDER_IMAGE=registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.21 if [[ -n ${KONFLUX_BUILDS} ]]; then E2E_SUITE_BUILDER_IMAGE="brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_9_1.24" fi diff --git a/boilerplate/openshift/golang-osd-operator/OWNERS_ALIASES b/boilerplate/openshift/golang-osd-operator/OWNERS_ALIASES index 97fac93..ca5fda7 100644 --- a/boilerplate/openshift/golang-osd-operator/OWNERS_ALIASES +++ b/boilerplate/openshift/golang-osd-operator/OWNERS_ALIASES @@ -5,7 +5,9 @@ aliases: srep-functional-team-aurora: - abyrne55 + - AlexSmithGH - dakotalongRH + - eth1030 - joshbranham - luis-falcon - reedcort @@ -65,7 +67,6 @@ aliases: - feichashao - samanthajayasinghe - xiaoyu74 - - Dee-6777 - Tessg22 - smarthall srep-infra-cicd: diff --git a/boilerplate/openshift/golang-osd-operator/standard.mk b/boilerplate/openshift/golang-osd-operator/standard.mk index a77c9e8..5bcf92b 100644 --- a/boilerplate/openshift/golang-osd-operator/standard.mk +++ b/boilerplate/openshift/golang-osd-operator/standard.mk @@ -364,6 +364,7 @@ endif # Boilerplate container-make targets. # Runs 'make' in the boilerplate backing container. # If the command fails, starts a shell in the container so you can debug. +# Set NONINTERACTIVE=true to skip the debug shell for CI/automation. .PHONY: container-test container-test: ${BOILERPLATE_CONTAINER_MAKE} test @@ -384,6 +385,11 @@ container-validate: container-coverage: ${BOILERPLATE_CONTAINER_MAKE} coverage +# Run all container-* validation targets in sequence. +# Set NONINTERACTIVE=true to skip debug shells and fail fast for CI/automation. +.PHONY: container-all +container-all: container-lint container-generate container-coverage container-test container-validate + .PHONY: rvmo-bundle rvmo-bundle: RELEASE_BRANCH=$(RELEASE_BRANCH) \ diff --git a/build/Dockerfile b/build/Dockerfile index 2c3d31a..833c2f3 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -6,7 +6,7 @@ COPY . /osd-example-operator WORKDIR /osd-example-operator RUN make go-build -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6-1758184547 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.7-1763362218 ENV OPERATOR=/usr/local/bin/osd-example-operator \ USER_UID=1001 \ USER_NAME=osd-example-operator diff --git a/build/Dockerfile.olm-registry b/build/Dockerfile.olm-registry index e3dcb11..9979b51 100644 --- a/build/Dockerfile.olm-registry +++ b/build/Dockerfile.olm-registry @@ -4,7 +4,7 @@ COPY ${SAAS_OPERATOR_DIR} manifests RUN initializer --permissive # ubi-micro does not work for clusters with fips enabled unless we make OpenSSL available -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6-1758184547 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.7-1763362218 COPY --from=builder /bin/registry-server /bin/registry-server COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe diff --git a/test/e2e/osd_example_operator_runner_test.go b/test/e2e/osd_example_operator_runner_test.go index ccd2a04..32d9bcd 100644 --- a/test/e2e/osd_example_operator_runner_test.go +++ b/test/e2e/osd_example_operator_runner_test.go @@ -4,7 +4,6 @@ package osde2etests import ( - "fmt" "os" "path/filepath" "testing" @@ -27,11 +26,3 @@ func TestOsdExampleOperator(t *testing.T) { } RunSpecs(t, "Osd Example Operator", suiteConfig, reporterConfig) } - -// Failing test case added on purpose -var _ = Describe("Intentional Failure Test", func() { - It("should fail on purpose", func() { - fmt.Println("Running Intentional Failure Test") - Expect(true).To(BeFalse(), "This test is designed to fail intentionally") - }) -}) diff --git a/test/e2e/osd_example_operator_tests.go b/test/e2e/osd_example_operator_tests.go index 71363c9..642405d 100644 --- a/test/e2e/osd_example_operator_tests.go +++ b/test/e2e/osd_example_operator_tests.go @@ -5,7 +5,7 @@ package osde2etests import ( "context" - + "fmt" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -14,4 +14,10 @@ var _ = Describe("osd-example-operator", func() { It("asserts success", func(ctx context.Context) { Expect(true).To(BeTrue(), "True should be true") }) + + // Failing test for log analysis demo + It("should fail on purpose", func() { + fmt.Println("Running Intentional Failure Test") + Expect(true).To(BeFalse(), "This test is designed to fail intentionally") + }) }) From 4564cf57d4bd7f79647139ad30f6d86093673569 Mon Sep 17 00:00:00 2001 From: vkadapar Date: Mon, 24 Nov 2025 16:52:51 -0500 Subject: [PATCH 03/13] SDCICD-1620: add log analysis slack notification parameters --- test/e2e/e2e-template.yml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/test/e2e/e2e-template.yml b/test/e2e/e2e-template.yml index dcb709f..e380f02 100644 --- a/test/e2e/e2e-template.yml +++ b/test/e2e/e2e-template.yml @@ -38,6 +38,10 @@ parameters: required: false - name: SLACK_CHANNEL required: false + - name: LOG_ANALYSIS_SLACK_WEBHOOK + required: false + - name: SLACK_NOTIFY + required: false objects: - apiVersion: batch/v1 kind: Job @@ -58,7 +62,7 @@ objects: - --only-health-check-nodes - --skip-destroy-cluster - --skip-must-gather - - --enable-llm-analysis + - --log-analysis-enable - --configs - ${OSDE2E_CONFIGS} securityContext: @@ -95,3 +99,7 @@ objects: value: ${CAD_PAGERDUTY_ROUTING_KEY} - name: GEMINI_API_KEY value: ${GEMINI_API_KEY} + - name: LOG_ANALYSIS_SLACK_WEBHOOK + value: ${LOG_ANALYSIS_SLACK_WEBHOOK} + - name: SLACK_NOTIFY + value: ${SLACK_NOTIFY} From 085769c14826b8bbc29bfb3365819892361906ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Dec 2025 00:22:44 +0000 Subject: [PATCH 04/13] Bump ubi9/ubi-minimal from 9.7-1763362218 to 9.7-1764794109 in /build Bumps ubi9/ubi-minimal from 9.7-1763362218 to 9.7-1764794109. --- updated-dependencies: - dependency-name: ubi9/ubi-minimal dependency-version: 9.7-1764794109 dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- build/Dockerfile | 2 +- build/Dockerfile.olm-registry | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build/Dockerfile b/build/Dockerfile index 833c2f3..6f64d3e 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -6,7 +6,7 @@ COPY . /osd-example-operator WORKDIR /osd-example-operator RUN make go-build -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.7-1763362218 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.7-1764794109 ENV OPERATOR=/usr/local/bin/osd-example-operator \ USER_UID=1001 \ USER_NAME=osd-example-operator diff --git a/build/Dockerfile.olm-registry b/build/Dockerfile.olm-registry index 9979b51..d90683a 100644 --- a/build/Dockerfile.olm-registry +++ b/build/Dockerfile.olm-registry @@ -4,7 +4,7 @@ COPY ${SAAS_OPERATOR_DIR} manifests RUN initializer --permissive # ubi-micro does not work for clusters with fips enabled unless we make OpenSSL available -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.7-1763362218 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.7-1764794109 COPY --from=builder /bin/registry-server /bin/registry-server COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe From 4427a76db9b89f90ed9102c501bfa33f2d9f815a Mon Sep 17 00:00:00 2001 From: vkadapar Date: Wed, 17 Dec 2025 12:50:28 -0800 Subject: [PATCH 05/13] SDCICD-1686: Get Osde2e ENV variables as Kubernetes Secrets --- test/e2e/e2e-template.yml | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/test/e2e/e2e-template.yml b/test/e2e/e2e-template.yml index e380f02..8c4b650 100644 --- a/test/e2e/e2e-template.yml +++ b/test/e2e/e2e-template.yml @@ -78,15 +78,37 @@ objects: - image: ${TEST_IMAGE}:${IMAGE_TAG} slackChannel: ${SLACK_CHANNEL} - name: OCM_CLIENT_ID - value: ${OCM_CLIENT_ID} + valueFrom: + secretKeyRef: + name: osde2e-secrets + key: OCM_CLIENT_ID - name: OCM_CLIENT_SECRET - value: ${OCM_CLIENT_SECRET} - - name: OCM_CCS - value: ${OCM_CCS} + valueFrom: + secretKeyRef: + name: osde2e-secrets + key: OCM_CLIENT_SECRET - name: AWS_ACCESS_KEY_ID - value: ${AWS_ACCESS_KEY_ID} + valueFrom: + secretKeyRef: + name: osde2e-secrets + key: AWS_ACCESS_KEY_ID - name: AWS_SECRET_ACCESS_KEY - value: ${AWS_SECRET_ACCESS_KEY} + valueFrom: + secretKeyRef: + name: osde2e-secrets + key: AWS_SECRET_ACCESS_KEY + - name: GEMINI_API_KEY + valueFrom: + secretKeyRef: + name: osde2e-secrets + key: GEMINI_API_KEY + - name: LOG_ANALYSIS_SLACK_WEBHOOK + valueFrom: + secretKeyRef: + name: osde2e-secrets + key: LOG_ANALYSIS_SLACK_WEBHOOK + - name: OCM_CCS + value: ${OCM_CCS} - name: CLOUD_PROVIDER_REGION value: ${CLOUD_PROVIDER_REGION} - name: GCP_CREDS_JSON @@ -97,9 +119,5 @@ objects: value: ${USE_EXISTING_CLUSTER} - name: CAD_PAGERDUTY_ROUTING_KEY value: ${CAD_PAGERDUTY_ROUTING_KEY} - - name: GEMINI_API_KEY - value: ${GEMINI_API_KEY} - - name: LOG_ANALYSIS_SLACK_WEBHOOK - value: ${LOG_ANALYSIS_SLACK_WEBHOOK} - name: SLACK_NOTIFY value: ${SLACK_NOTIFY} From ff65a2d45a53f6b2344a62c87589043a67b9c684 Mon Sep 17 00:00:00 2001 From: vkadapar Date: Thu, 18 Dec 2025 11:56:09 -0800 Subject: [PATCH 06/13] SDCICD-1686: update secret key references --- test/e2e/e2e-template.yml | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/test/e2e/e2e-template.yml b/test/e2e/e2e-template.yml index 8c4b650..6d3c4ce 100644 --- a/test/e2e/e2e-template.yml +++ b/test/e2e/e2e-template.yml @@ -80,33 +80,33 @@ objects: - name: OCM_CLIENT_ID valueFrom: secretKeyRef: - name: osde2e-secrets - key: OCM_CLIENT_ID + name: osde2e-ocm-credentials + key: ocm-client-id - name: OCM_CLIENT_SECRET valueFrom: secretKeyRef: - name: osde2e-secrets - key: OCM_CLIENT_SECRET + name: osde2e-ocm-credentials + key: ocm-client-secret - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: - name: osde2e-secrets - key: AWS_ACCESS_KEY_ID + name: osde2e-aws-credentials + key: aws-access-key-id - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: - name: osde2e-secrets - key: AWS_SECRET_ACCESS_KEY + name: osde2e-aws-credentials + key: aws-secret-access-key - name: GEMINI_API_KEY valueFrom: secretKeyRef: - name: osde2e-secrets - key: GEMINI_API_KEY + name: osde2e-gcp-credentials + key: gemini-api-key - name: LOG_ANALYSIS_SLACK_WEBHOOK valueFrom: secretKeyRef: - name: osde2e-secrets - key: LOG_ANALYSIS_SLACK_WEBHOOK + name: osde2e-ocm-credentials + key: log-analysis-slack-webhook - name: OCM_CCS value: ${OCM_CCS} - name: CLOUD_PROVIDER_REGION @@ -118,6 +118,9 @@ objects: - name: USE_EXISTING_CLUSTER value: ${USE_EXISTING_CLUSTER} - name: CAD_PAGERDUTY_ROUTING_KEY - value: ${CAD_PAGERDUTY_ROUTING_KEY} + valueFrom: + secretKeyRef: + name: osde2e-pagerduty-credentials + key: cad-pagerduty-routing-key - name: SLACK_NOTIFY value: ${SLACK_NOTIFY} From c19625776e08c79c61378d418547fb0ddee76893 Mon Sep 17 00:00:00 2001 From: ritmun Date: Fri, 19 Dec 2025 12:04:40 -0600 Subject: [PATCH 07/13] add test for ocm client --- go.mod | 50 +++++--- go.sum | 152 ++++++++++++++++++------- test/e2e/osd_example_operator_tests.go | 13 +++ 3 files changed, 159 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 3064ce1..bb326f1 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,11 @@ module github.com/openshift/osd-example-operator -go 1.23.9 +go 1.24.0 require ( - github.com/onsi/ginkgo/v2 v2.21.0 - github.com/onsi/gomega v1.35.1 + github.com/onsi/ginkgo/v2 v2.27.3 + github.com/onsi/gomega v1.38.3 + github.com/openshift/osde2e-common v0.0.0-20251217143616-0fcf893b634b k8s.io/apimachinery v0.32.7 k8s.io/client-go v0.32.0 k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f @@ -13,8 +14,10 @@ require ( require ( cel.dev/expr v0.18.0 // indirect + github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -25,7 +28,7 @@ require ( github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -33,27 +36,35 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang/glog v1.2.4 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/css v1.0.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/microcosm-cc/bluemonday v1.0.26 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift-online/ocm-api-model/clientapi v0.0.440 // indirect + github.com/openshift-online/ocm-api-model/model v0.0.440 // indirect + github.com/openshift-online/ocm-sdk-go v0.1.486 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_golang v1.23.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.67.4 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect @@ -68,20 +79,23 @@ require ( go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/mod v0.28.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/text v0.30.0 // indirect golang.org/x/time v0.7.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.37.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index c6644a7..989ed57 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,13 @@ cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -30,9 +34,15 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -47,8 +57,14 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= @@ -58,25 +74,51 @@ github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= +github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/itchyny/gojq v0.12.7 h1:hYPTpeWfrJ1OT+2j6cvBScbhl0TkdwGM4bc66onUSOQ= +github.com/itchyny/gojq v0.12.7/go.mod h1:ZdvNHVlzPgUf8pgjnuDTmGfHA/21KoutQUJ3An/xNuw= +github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU= +github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= +github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= +github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= +github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= +github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -84,8 +126,16 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= +github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= +github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -93,26 +143,36 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= -github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= -github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= +github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= +github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= +github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= +github.com/openshift-online/ocm-api-model/clientapi v0.0.440 h1:BGWikczo8UuSvzEkoTf6q9iodg/pC7ibfvn5bvuD5c0= +github.com/openshift-online/ocm-api-model/clientapi v0.0.440/go.mod h1:fZwy5HY2URG9nrExvQeXrDU/08TGqZ16f8oymVEN5lo= +github.com/openshift-online/ocm-api-model/model v0.0.440 h1:sfi+fEw3ORh32keJdkE7ZA0g1uCBf457dRg6Qs8yJ6s= +github.com/openshift-online/ocm-api-model/model v0.0.440/go.mod h1:PQIoq6P8Vlb7goOdRMLK8nJY+B7HH0RTqYAa4kyidTE= +github.com/openshift-online/ocm-sdk-go v0.1.486 h1:fhrTGmuLrMkEOoQTOc7hbN7Pmu7n7VatdrMve8qznHM= +github.com/openshift-online/ocm-sdk-go v0.1.486/go.mod h1:HsSAZFf12U8seRMuWpgehW6saw3ufaPnNN+jPg8BPTs= +github.com/openshift/osde2e-common v0.0.0-20251217143616-0fcf893b634b h1:rp7BTSVuOsTrNku6RsnFf+Z2eBNv8SoAqK6FPSPkE/A= +github.com/openshift/osde2e-common v0.0.0-20251217143616-0fcf893b634b/go.mod h1:SIOEWit5VC27o7hHN7QJWtMLPVxjxpLDlDc/dt+sna0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= +github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -126,8 +186,16 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -154,45 +222,53 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= +golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -205,8 +281,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/test/e2e/osd_example_operator_tests.go b/test/e2e/osd_example_operator_tests.go index 642405d..86f18ba 100644 --- a/test/e2e/osd_example_operator_tests.go +++ b/test/e2e/osd_example_operator_tests.go @@ -8,6 +8,8 @@ import ( "fmt" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + ocme2e "github.com/openshift/osde2e-common/pkg/clients/ocm" + "os" ) var _ = Describe("osd-example-operator", func() { @@ -15,6 +17,17 @@ var _ = Describe("osd-example-operator", func() { Expect(true).To(BeTrue(), "True should be true") }) + It("should connect to stage ocm client", func(ctx context.Context) { + By("Getting ocm creds") + clientID := os.Getenv("OCM_CLIENT_ID") + clientSecret := os.Getenv("OCM_CLIENT_SECRET") + Expect(clientID).NotTo(BeEmpty(), "OCM_CLIENT_ID must be set") + Expect(clientSecret).NotTo(BeEmpty(), "OCM_CLIENT_SECRET must be set") + ocmEnv := ocme2e.Stage + _, err := ocme2e.New(ctx, "", clientID, clientSecret, ocmEnv) + Expect(err).ShouldNot(HaveOccurred(), "Unable to setup stage OCM Client") + }) + // Failing test for log analysis demo It("should fail on purpose", func() { fmt.Println("Running Intentional Failure Test") From d2f95c8d43df4f57c79a8be37f9e842b7014f44c Mon Sep 17 00:00:00 2001 From: YiqinZhang Date: Tue, 20 Jan 2026 03:11:42 -0500 Subject: [PATCH 08/13] add s3 uploader --- test/e2e/e2e-template.yml | 75 ++++++++++++++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 9 deletions(-) diff --git a/test/e2e/e2e-template.yml b/test/e2e/e2e-template.yml index 6d3c4ce..8b9cc15 100644 --- a/test/e2e/e2e-template.yml +++ b/test/e2e/e2e-template.yml @@ -1,4 +1,5 @@ # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. +# Temporarily add S3 upload capability to the osde2e template for testing apiVersion: template.openshift.io/v1 kind: Template metadata: @@ -42,6 +43,14 @@ parameters: required: false - name: SLACK_NOTIFY required: false + - name: S3_RESULTS_BUCKET + value: 'osde2e-loki-logs' + - name: S3_RESULTS_REGION + value: 'us-east-1' + - name: ENABLE_S3_UPLOAD + value: 'true' + - name: OPERATOR_NAME + value: 'osd-example-operator' objects: - apiVersion: batch/v1 kind: Job @@ -52,19 +61,21 @@ objects: template: spec: restartPolicy: Never + volumes: + - name: test-results + emptyDir: {} containers: - name: osde2e image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest command: - - /osde2e - args: - - test - - --only-health-check-nodes - - --skip-destroy-cluster - - --skip-must-gather - - --log-analysis-enable - - --configs - - ${OSDE2E_CONFIGS} + - /bin/sh + - -c + - | + /osde2e test --only-health-check-nodes --skip-destroy-cluster --skip-must-gather --log-analysis-enable --configs ${OSDE2E_CONFIGS} + TEST_EXIT_CODE=$? + cp -r /test-run-results/* /shared-results/ 2>/dev/null || true + echo "$TEST_EXIT_CODE" > /shared-results/.test-complete + exit $TEST_EXIT_CODE securityContext: runAsNonRoot: true allowPrivilegeEscalation: false @@ -124,3 +135,49 @@ objects: key: cad-pagerduty-routing-key - name: SLACK_NOTIFY value: ${SLACK_NOTIFY} + volumeMounts: + - name: test-results + mountPath: /shared-results + - name: s3-uploader + image: quay.io/app-sre/aws-cli + command: + - /bin/sh + - -c + - | + while [ ! -f /shared-results/.test-complete ]; do sleep 10; done + if [ "${ENABLE_S3_UPLOAD}" != "true" ]; then exit 0; fi + DATE=$(date -u +%Y-%m-%d) + S3_PREFIX="test-results/${OPERATOR_NAME}/${DATE}/${IMAGE_TAG}-${JOBID}" + aws s3 sync /shared-results/ "s3://${S3_RESULTS_BUCKET}/${S3_PREFIX}/" --exclude ".test-complete" --no-progress + echo "Uploaded to s3://${S3_RESULTS_BUCKET}/${S3_PREFIX}/" + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: osde2e-aws-credentials + key: aws-access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: osde2e-aws-credentials + key: aws-secret-access-key + - name: AWS_DEFAULT_REGION + value: ${S3_RESULTS_REGION} + - name: S3_RESULTS_BUCKET + value: ${S3_RESULTS_BUCKET} + - name: OPERATOR_NAME + value: ${OPERATOR_NAME} + - name: IMAGE_TAG + value: ${IMAGE_TAG} + - name: ENABLE_S3_UPLOAD + value: ${ENABLE_S3_UPLOAD} + volumeMounts: + - name: test-results + mountPath: /shared-results From f845fa750b74f8abf8bdca229ff2fa9f1f6987ae Mon Sep 17 00:00:00 2001 From: YiqinZhang Date: Fri, 23 Jan 2026 14:27:10 -0500 Subject: [PATCH 09/13] remove sidecar container --- test/e2e/e2e-template.yml | 77 +++++---------------------------------- 1 file changed, 10 insertions(+), 67 deletions(-) diff --git a/test/e2e/e2e-template.yml b/test/e2e/e2e-template.yml index 8b9cc15..3837c2b 100644 --- a/test/e2e/e2e-template.yml +++ b/test/e2e/e2e-template.yml @@ -1,5 +1,5 @@ # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. -# Temporarily add S3 upload capability to the osde2e template for testing +# Native osde2e S3 upload is enabled - no sidecar needed apiVersion: template.openshift.io/v1 kind: Template metadata: @@ -30,7 +30,7 @@ parameters: value: '' required: true - name: LOG_BUCKET - value: 'osde2e-logs' + value: 'osde2e-loki-logs' - name: USE_EXISTING_CLUSTER value: 'TRUE' - name: CAD_PAGERDUTY_ROUTING_KEY @@ -43,14 +43,6 @@ parameters: required: false - name: SLACK_NOTIFY required: false - - name: S3_RESULTS_BUCKET - value: 'osde2e-loki-logs' - - name: S3_RESULTS_REGION - value: 'us-east-1' - - name: ENABLE_S3_UPLOAD - value: 'true' - - name: OPERATOR_NAME - value: 'osd-example-operator' objects: - apiVersion: batch/v1 kind: Job @@ -61,21 +53,18 @@ objects: template: spec: restartPolicy: Never - volumes: - - name: test-results - emptyDir: {} containers: - name: osde2e image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest command: - - /bin/sh - - -c - - | - /osde2e test --only-health-check-nodes --skip-destroy-cluster --skip-must-gather --log-analysis-enable --configs ${OSDE2E_CONFIGS} - TEST_EXIT_CODE=$? - cp -r /test-run-results/* /shared-results/ 2>/dev/null || true - echo "$TEST_EXIT_CODE" > /shared-results/.test-complete - exit $TEST_EXIT_CODE + - /osde2e + - test + - --only-health-check-nodes + - --skip-destroy-cluster + - --skip-must-gather + - --log-analysis-enable + - --configs + - ${OSDE2E_CONFIGS} securityContext: runAsNonRoot: true allowPrivilegeEscalation: false @@ -135,49 +124,3 @@ objects: key: cad-pagerduty-routing-key - name: SLACK_NOTIFY value: ${SLACK_NOTIFY} - volumeMounts: - - name: test-results - mountPath: /shared-results - - name: s3-uploader - image: quay.io/app-sre/aws-cli - command: - - /bin/sh - - -c - - | - while [ ! -f /shared-results/.test-complete ]; do sleep 10; done - if [ "${ENABLE_S3_UPLOAD}" != "true" ]; then exit 0; fi - DATE=$(date -u +%Y-%m-%d) - S3_PREFIX="test-results/${OPERATOR_NAME}/${DATE}/${IMAGE_TAG}-${JOBID}" - aws s3 sync /shared-results/ "s3://${S3_RESULTS_BUCKET}/${S3_PREFIX}/" --exclude ".test-complete" --no-progress - echo "Uploaded to s3://${S3_RESULTS_BUCKET}/${S3_PREFIX}/" - securityContext: - runAsNonRoot: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: RuntimeDefault - env: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: osde2e-aws-credentials - key: aws-access-key-id - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: osde2e-aws-credentials - key: aws-secret-access-key - - name: AWS_DEFAULT_REGION - value: ${S3_RESULTS_REGION} - - name: S3_RESULTS_BUCKET - value: ${S3_RESULTS_BUCKET} - - name: OPERATOR_NAME - value: ${OPERATOR_NAME} - - name: IMAGE_TAG - value: ${IMAGE_TAG} - - name: ENABLE_S3_UPLOAD - value: ${ENABLE_S3_UPLOAD} - volumeMounts: - - name: test-results - mountPath: /shared-results From b0afb548e819f215ed46d4b72908bec1c4c9ed9c Mon Sep 17 00:00:00 2001 From: YiqinZhang Date: Wed, 28 Jan 2026 20:43:10 -0500 Subject: [PATCH 10/13] [SDCICD-1729] Revert e2e-template.yml to pre-PR#518 state Reverts the s3 upload comment change from PR #518, restoring the template to commit 3f9655c state. --- test/e2e/e2e-template.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/e2e-template.yml b/test/e2e/e2e-template.yml index 3837c2b..6d3c4ce 100644 --- a/test/e2e/e2e-template.yml +++ b/test/e2e/e2e-template.yml @@ -1,5 +1,4 @@ # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. -# Native osde2e S3 upload is enabled - no sidecar needed apiVersion: template.openshift.io/v1 kind: Template metadata: @@ -30,7 +29,7 @@ parameters: value: '' required: true - name: LOG_BUCKET - value: 'osde2e-loki-logs' + value: 'osde2e-logs' - name: USE_EXISTING_CLUSTER value: 'TRUE' - name: CAD_PAGERDUTY_ROUTING_KEY @@ -58,6 +57,7 @@ objects: image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest command: - /osde2e + args: - test - --only-health-check-nodes - --skip-destroy-cluster From 40a87ddd321d8b293fb067c52c6987cdb4137e20 Mon Sep 17 00:00:00 2001 From: YiqinZhang Date: Thu, 30 Oct 2025 14:39:56 -0400 Subject: [PATCH 11/13] Add Tekton Results integration --- test/e2e/TEKTON-RESULTS-README.md | 243 ++++++++++++++++++++++ test/e2e/e2e-tekton-template.yml | 164 +++++++++++++++ test/e2e/osde2e-pipeline.yml | 168 ++++++++++++++++ test/e2e/osde2e-tekton-task.yml | 322 ++++++++++++++++++++++++++++++ 4 files changed, 897 insertions(+) create mode 100644 test/e2e/TEKTON-RESULTS-README.md create mode 100644 test/e2e/e2e-tekton-template.yml create mode 100644 test/e2e/osde2e-pipeline.yml create mode 100644 test/e2e/osde2e-tekton-task.yml diff --git a/test/e2e/TEKTON-RESULTS-README.md b/test/e2e/TEKTON-RESULTS-README.md new file mode 100644 index 0000000..ab17a3b --- /dev/null +++ b/test/e2e/TEKTON-RESULTS-README.md @@ -0,0 +1,243 @@ +# Tekton Results Integration for OSDE2E Tests + +Complete guide for integrating Tekton Results with osde2e testing to enable structured test result collection and enhanced observability. + +## 📦 What's Included + +This integration adds three core files that replace the traditional Kubernetes Job with Tekton Pipelines: + +1. **`osde2e-tekton-task.yml`** (12KB) - Multi-step Task with structured result collection +2. **`osde2e-pipeline.yml`** (4.9KB) - Pipeline orchestration +3. **`e2e-tekton-template.yml`** (6.3KB) - OpenShift Template (referenced by app-interface) + +## 🔄 Required Changes in app-interface + +Only **2 changes** needed in `osde2e-focus-test.yaml`: + +### Change 1: Update Template Path +```yaml +resourceTemplates: +- name: saas-oeo-e2e-test + url: https://github.com/openshift/osd-example-operator + path: /test/e2e/e2e-tekton-template.yml # Changed from e2e-template.yml +``` + +### Change 2: Update Resource Types +```yaml +managedResourceTypes: +- PipelineRun.tekton.dev # Changed from: Job +- ServiceAccount +- Role.rbac.authorization.k8s.io # Changed from: ClusterRole +- RoleBinding.rbac.authorization.k8s.io # Changed from: ClusterRoleBinding +- PersistentVolumeClaim # New: for workspace storage +``` + +**Everything else unchanged**: parameters, credentials, test logic, promotion channels, Slack notifications. + +## ✅ Key Benefits + +| Feature | Job (Current) | Tekton + Results (New) | +|---------|--------------|------------------------| +| **Result Storage** | Pod logs (temporary) | Structured Results (persistent) | +| **Observability** | Single log stream | Multi-step visibility | +| **Result Format** | Plain text | Structured (status, logs, summary, JUnit) | +| **Historical Query** | Not supported | Supported (via Results API) | +| **UI** | Basic Pod logs | Enhanced (Pipeline graph, Results panel) | + +### Structured Results Captured +- **test-status**: PASS/FAIL status +- **test-logs**: Complete test execution logs +- **test-summary**: Test summary (timing, config, status) +- **test-results**: JUnit XML format + +## 🚀 Quick Start + +### 1. Deploy Tekton Resources +```bash +cd test/e2e + +# Deploy Task and Pipeline +oc apply -f osde2e-tekton-task.yml -n +oc apply -f osde2e-pipeline.yml -n + +# Verify deployment +oc get task osde2e-test-task -n +oc get pipeline osde2e-test-pipeline -n +``` + +### 2. Run a Test +```bash +# Using the template (app-interface style) +oc process -f e2e-tekton-template.yml \ + -p OSDE2E_CONFIGS="rosa,sts,int,ad-hoc-image" \ + -p TEST_IMAGE="quay.io/redhat-services-prod/oeo-cicada-tenant/osd-example-operator-e2e" \ + -p IMAGE_TAG="latest" \ + -p CLOUD_PROVIDER_REGION="us-east-1" \ + -p NAMESPACE="" \ + | oc apply -f - +``` + +### 3. View Results +```bash +# Get latest PipelineRun +PIPELINERUN=$(oc get pipelinerun -n --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1].metadata.name}') + +# View Results +oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.status.results}' | jq + +# Expected output: +# [{"name": "final-test-status", "value": "PASS"}] + +# View detailed logs +tkn pipelinerun logs $PIPELINERUN -f -n +``` + +## 📖 Complete Documentation + +For detailed information, see: +- **Complete Verification Guide (Chinese)**: [TEKTON-RESULTS-VERIFICATION.md](./TEKTON-RESULTS-VERIFICATION.md) + - Prerequisites and environment checks + - How Tekton Results works (architecture diagram) + - Step-by-step deployment instructions + - Multiple methods to view and verify Results + - OpenShift Console navigation guide + - Tekton Results API usage + - Troubleshooting common issues + - Complete verification checklist + +## 🔍 Quick Verification + +### Check Results in PipelineRun +```bash +# View PipelineRun status +oc get pipelinerun $PIPELINERUN -n + +# View Results +oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.status.results}' | jq + +# View TaskRun Results (detailed) +TASKRUN=$(oc get taskrun -n -l tekton.dev/pipelineRun=$PIPELINERUN -o jsonpath='{.items[0].metadata.name}') +oc get taskrun $TASKRUN -n -o jsonpath='{.status.results}' | jq + +# Expected: test-status, test-logs, test-summary, test-results +``` + +### View in OpenShift Console +``` +1. Navigate to: Pipelines → PipelineRuns +2. Click on your PipelineRun +3. See Results panel showing test-status +4. Click "Logs" tab to view step-by-step execution +``` + +## 🐛 Troubleshooting + +### PipelineRun Stuck in Pending +```bash +# Check events +oc describe pipelinerun $PIPELINERUN -n | grep -A 10 Events + +# Common causes: PVC creation, ServiceAccount permissions, Task/Pipeline missing +``` + +### No Results Visible +```bash +# Check TaskRun logs +tkn taskrun logs $TASKRUN -n + +# Verify Results annotations +oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.metadata.annotations}' | grep results +``` + +### Results Empty or Incomplete +```bash +# Tekton Results have a 4KB limit per result +# For large logs, only summary is stored in Results +# Complete logs available via: tkn pipelinerun logs +``` + +## 📊 Migration Path + +### Phase 1: osd-example-operator PR (Current) +- Add 3 new files to repository +- No changes to existing tests + +### Phase 2: app-interface PR (After merge) +- Update 2 fields in osde2e-focus-test.yaml +- Test in int01 → stage02 → production + +### Rollback +If issues occur, revert the app-interface path change: +```yaml +path: /test/e2e/e2e-template.yml # Back to Job template +``` + +## 📊 How It Works + +``` +┌─────────────────────────────────────────┐ +│ app-interface SaaS File │ +│ path: /test/e2e/e2e-tekton-template.yml│ +└──────────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────┐ +│ Template Creates: │ +│ - PipelineRun (with Results annotations)│ +│ - ServiceAccount │ +│ - Role/RoleBinding │ +│ - PersistentVolumeClaim │ +└──────────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────┐ +│ Pipeline Executes: │ +│ - osde2e-test-task │ +│ 1. Fix permissions │ +│ 2. Setup environment │ +│ 3. Run osde2e tests │ +│ 4. Collect results (structured) │ +└──────────────────┬──────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────┐ +│ Results Stored: │ +│ - TaskRun.status.results[] (4 results) │ +│ - PipelineRun.status.results[] (1 result)│ +│ - (Optional) Results API database │ +└─────────────────────────────────────────┘ +``` + +## 📊 Success Indicators + +When you see this output, integration is successful: + +```bash +$ oc get pipelinerun $PIPELINERUN -n +NAME SUCCEEDED REASON STARTTIME COMPLETIONTIME +osde2e-...-1730300000 True Succeeded 5m 2m + +$ oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.status.results}' | jq +[ + { + "name": "final-test-status", + "value": "PASS" + } +] + +$ oc get taskrun $TASKRUN -n -o jsonpath='{.status.results[*].name}' +test-status test-logs test-summary test-results +``` + +## 🔗 Additional Resources + +- [Tekton Results Official Docs](https://tekton.dev/docs/pipelines/results/) +- [OpenShift Pipelines Docs](https://docs.openshift.com/pipelines/) +- [Complete Verification Guide (Chinese)](./TEKTON-RESULTS-VERIFICATION.md) + +--- + +**Need Help?** +- See [TEKTON-RESULTS-VERIFICATION.md](./TEKTON-RESULTS-VERIFICATION.md) for detailed step-by-step guide +- Contact via JIRA: SDCICD-1672 + diff --git a/test/e2e/e2e-tekton-template.yml b/test/e2e/e2e-tekton-template.yml new file mode 100644 index 0000000..5fd1834 --- /dev/null +++ b/test/e2e/e2e-tekton-template.yml @@ -0,0 +1,164 @@ +# OpenShift Template for OSDE2E Testing with Tekton Pipeline +# This template creates a PipelineRun instead of a Job for enhanced observability +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: osde2e-focused-tests-tekton + labels: + app: osde2e + component: testing + version: tekton-v1 + annotations: + description: "OSDE2E focused tests using Tekton Pipeline for enhanced observability" + iconClass: "icon-openshift" + tags: "osde2e,testing,tekton,pipeline" + openshift.io/display-name: "OSDE2E Tekton Tests" + openshift.io/documentation-url: "https://github.com/openshift/osde2e" + openshift.io/support-url: "https://github.com/openshift/osd-example-operator" +parameters: + - name: OSDE2E_CONFIGS + displayName: "OSDE2E Configurations" + description: "Configuration string for osde2e (e.g., rosa,sts,int,ad-hoc-image)" + required: true + - name: TEST_IMAGE + displayName: "Test Image" + description: "The test image to run" + required: true + - name: OCM_CLIENT_ID + displayName: "OCM Client ID" + description: "OCM client ID for authentication" + required: false + - name: OCM_CLIENT_SECRET + displayName: "OCM Client Secret" + description: "OCM client secret for authentication" + required: false + - name: OCM_CCS + displayName: "OCM CCS" + description: "OCM CCS configuration" + required: false + - name: AWS_ACCESS_KEY_ID + displayName: "AWS Access Key ID" + description: "AWS access key ID" + required: false + - name: AWS_SECRET_ACCESS_KEY + displayName: "AWS Secret Access Key" + description: "AWS secret access key" + required: false + - name: CLOUD_PROVIDER_REGION + displayName: "Cloud Provider Region" + description: "Cloud provider region" + required: false + value: "us-east-1" + - name: GCP_CREDS_JSON + displayName: "GCP Credentials JSON" + description: "GCP credentials in JSON format" + required: false + - name: JOBID + displayName: "Job ID" + description: "Unique identifier for this test run" + generate: expression + from: "[0-9a-z]{7}" + - name: IMAGE_TAG + displayName: "Image Tag" + description: "Tag for the test image" + value: '' + required: true + - name: LOG_BUCKET + displayName: "Log Bucket" + description: "S3 bucket for storing logs" + value: 'osde2e-logs' + - name: USE_EXISTING_CLUSTER + displayName: "Use Existing Cluster" + description: "Whether to use an existing cluster" + value: 'TRUE' + - name: CLUSTER_ID + displayName: "Cluster ID" + description: "OpenShift cluster ID to test against" + required: false + value: '' + - name: CAD_PAGERDUTY_ROUTING_KEY + displayName: "PagerDuty Routing Key" + description: "PagerDuty routing key for alerts" + required: false +objects: + # PersistentVolumeClaim for test workspace + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: osde2e-test-workspace-${JOBID} + labels: + app: osde2e + component: testing + job-id: ${JOBID} + test-image-tag: ${IMAGE_TAG} + annotations: + description: "Workspace for OSDE2E test execution and result storage" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp3-csi + + # PipelineRun for test execution + # Note: Uses the default 'pipeline' ServiceAccount which has anyuid SCC + - apiVersion: tekton.dev/v1 + kind: PipelineRun + metadata: + name: osde2e-osd-example-operator-${IMAGE_TAG}-${JOBID} + labels: + app: osde2e + component: testing + job-id: ${JOBID} + test-image-tag: ${IMAGE_TAG} + app.kubernetes.io/managed-by: tekton-pipelines # Required for ClusterLogForwarder + annotations: + osde2e-configs: ${OSDE2E_CONFIGS} + # Enable Tekton Results collection + results.tekton.dev/record: "true" + results.tekton.dev/log: "true" + # Additional metadata for observability + osde2e.openshift.io/config: ${OSDE2E_CONFIGS} + osde2e.openshift.io/test-image: ${TEST_IMAGE}:${IMAGE_TAG} + osde2e.openshift.io/region: ${CLOUD_PROVIDER_REGION} + osde2e.openshift.io/job-id: ${JOBID} + description: "OSDE2E test execution for osd-example-operator" + spec: + serviceAccountName: pipeline # Use the default pipeline SA which has anyuid SCC + pipelineRef: + name: osde2e-test-pipeline + params: + - name: OSDE2E_CONFIGS + value: ${OSDE2E_CONFIGS} + - name: TEST_IMAGE + value: ${TEST_IMAGE} + - name: IMAGE_TAG + value: ${IMAGE_TAG} + - name: OCM_CLIENT_ID + value: ${OCM_CLIENT_ID} + - name: OCM_CLIENT_SECRET + value: ${OCM_CLIENT_SECRET} + - name: AWS_ACCESS_KEY_ID + value: ${AWS_ACCESS_KEY_ID} + - name: AWS_SECRET_ACCESS_KEY + value: ${AWS_SECRET_ACCESS_KEY} + - name: CLOUD_PROVIDER_REGION + value: ${CLOUD_PROVIDER_REGION} + - name: LOG_BUCKET + value: ${LOG_BUCKET} + - name: USE_EXISTING_CLUSTER + value: ${USE_EXISTING_CLUSTER} + - name: CLUSTER_ID + value: ${CLUSTER_ID} + - name: CAD_PAGERDUTY_ROUTING_KEY + value: ${CAD_PAGERDUTY_ROUTING_KEY} + workspaces: + - name: test-workspace + persistentVolumeClaim: + claimName: osde2e-test-workspace-${JOBID} + # Timeout for the entire pipeline (3 hours like the original Job) + timeouts: + pipeline: "3h0m0s" + tasks: "2h45m0s" + finally: "15m0s" diff --git a/test/e2e/osde2e-pipeline.yml b/test/e2e/osde2e-pipeline.yml new file mode 100644 index 0000000..f6d6112 --- /dev/null +++ b/test/e2e/osde2e-pipeline.yml @@ -0,0 +1,168 @@ +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: osde2e-test-pipeline + labels: + app.kubernetes.io/version: "0.1" + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Testing + tekton.dev/tags: osde2e,testing,e2e,pipeline + tekton.dev/displayName: "OSDE2E Test Pipeline" + tekton.dev/platforms: "linux/amd64" +spec: + description: >- + This pipeline orchestrates osde2e testing with comprehensive result collection + for Tekton Results observability. It runs tests, collects logs and JUnit results, + and provides structured output for monitoring and analysis. + + params: + - name: OSDE2E_CONFIGS + type: string + description: Configuration string for osde2e (e.g., "rosa,sts,int,ad-hoc-image") + - name: TEST_IMAGE + type: string + description: The test image to run + - name: IMAGE_TAG + type: string + description: Tag for the test image + default: "latest" + - name: OCM_CLIENT_ID + type: string + description: OCM client ID for authentication + default: "" + - name: OCM_CLIENT_SECRET + type: string + description: OCM client secret for authentication + default: "" + - name: AWS_ACCESS_KEY_ID + type: string + description: AWS access key ID + default: "" + - name: AWS_SECRET_ACCESS_KEY + type: string + description: AWS secret access key + default: "" + - name: CLOUD_PROVIDER_REGION + type: string + description: Cloud provider region + default: "us-east-1" + - name: LOG_BUCKET + type: string + description: S3 bucket for logs + default: "osde2e-logs" + - name: USE_EXISTING_CLUSTER + type: string + description: Whether to use existing cluster + default: "TRUE" + - name: CLUSTER_ID + type: string + description: OpenShift cluster ID to test against + default: "" + - name: CAD_PAGERDUTY_ROUTING_KEY + type: string + description: PagerDuty routing key for alerts + default: "" + + workspaces: + - name: test-workspace + description: Shared workspace for test execution and result storage + + results: + - name: final-test-status + description: Final test status from the pipeline + value: $(tasks.osde2e-test.results.test-status) + - name: test-summary + description: Test execution summary + value: $(tasks.osde2e-test.results.test-summary) + + tasks: + - name: osde2e-test + taskRef: + name: osde2e-test-task + params: + - name: OSDE2E_CONFIGS + value: $(params.OSDE2E_CONFIGS) + - name: TEST_IMAGE + value: $(params.TEST_IMAGE) + - name: IMAGE_TAG + value: $(params.IMAGE_TAG) + - name: OCM_CLIENT_ID + value: $(params.OCM_CLIENT_ID) + - name: OCM_CLIENT_SECRET + value: $(params.OCM_CLIENT_SECRET) + - name: AWS_ACCESS_KEY_ID + value: $(params.AWS_ACCESS_KEY_ID) + - name: AWS_SECRET_ACCESS_KEY + value: $(params.AWS_SECRET_ACCESS_KEY) + - name: CLOUD_PROVIDER_REGION + value: $(params.CLOUD_PROVIDER_REGION) + - name: LOG_BUCKET + value: $(params.LOG_BUCKET) + - name: USE_EXISTING_CLUSTER + value: $(params.USE_EXISTING_CLUSTER) + - name: CLUSTER_ID + value: $(params.CLUSTER_ID) + - name: CAD_PAGERDUTY_ROUTING_KEY + value: $(params.CAD_PAGERDUTY_ROUTING_KEY) + workspaces: + - name: test-results + workspace: test-workspace + + finally: + - name: cleanup-and-report + params: + - name: TEST_STATUS + value: $(tasks.osde2e-test.results.test-status) + - name: OSDE2E_CONFIGS + value: $(params.OSDE2E_CONFIGS) + taskSpec: + params: + - name: TEST_STATUS + - name: OSDE2E_CONFIGS + workspaces: + - name: test-results + mountPath: /workspace/test-results + steps: + - name: final-report + image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest + script: | + #!/bin/bash + set -euo pipefail + + echo "=== Final Pipeline Report ===" + echo "Pipeline execution completed" + echo "Test Status: $(params.TEST_STATUS)" + echo "Configuration: $(params.OSDE2E_CONFIGS)" + echo "Timestamp: $(date -Iseconds)" + + # Log final status to workspace + { + echo "=== Pipeline Final Report ===" + echo "Execution completed at: $(date -Iseconds)" + echo "Final Status: $(params.TEST_STATUS)" + echo "Configuration: $(params.OSDE2E_CONFIGS)" + echo "" + echo "Available result files:" + find /workspace/test-results -type f -name "*.log" -o -name "*.xml" | sort + } >> /workspace/test-results/logs/pipeline-final-report.log + + if [ "$(params.TEST_STATUS)" = "PASS" ]; then + echo "✅ Pipeline completed successfully" + exit 0 + else + echo "❌ Pipeline completed with test failures" + # Don't fail the finally task, just report + exit 0 + fi + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + workspaces: + - name: test-results + workspace: test-workspace + diff --git a/test/e2e/osde2e-tekton-task.yml b/test/e2e/osde2e-tekton-task.yml new file mode 100644 index 0000000..1f6083b --- /dev/null +++ b/test/e2e/osde2e-tekton-task.yml @@ -0,0 +1,322 @@ +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: osde2e-test-task + labels: + app.kubernetes.io/version: "0.1" + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Testing + tekton.dev/tags: osde2e,testing,e2e + tekton.dev/displayName: "OSDE2E Test Task" + tekton.dev/platforms: "linux/amd64" +spec: + description: >- + This task runs osde2e tests and collects results for Tekton Results observability. + It captures stdout logs, JUnit XML results, and test status for pipeline observability. + + params: + - name: OSDE2E_CONFIGS + type: string + description: Configuration string for osde2e (e.g., "rosa,sts,int,ad-hoc-image") + - name: TEST_IMAGE + type: string + description: The test image to run + - name: IMAGE_TAG + type: string + description: Tag for the test image + default: "latest" + - name: OCM_CLIENT_ID + type: string + description: OCM client ID for authentication + default: "" + - name: OCM_CLIENT_SECRET + type: string + description: OCM client secret for authentication + default: "" + - name: AWS_ACCESS_KEY_ID + type: string + description: AWS access key ID + default: "" + - name: AWS_SECRET_ACCESS_KEY + type: string + description: AWS secret access key + default: "" + - name: CLOUD_PROVIDER_REGION + type: string + description: Cloud provider region + default: "us-east-1" + - name: LOG_BUCKET + type: string + description: S3 bucket for logs + default: "osde2e-logs" + - name: USE_EXISTING_CLUSTER + type: string + description: Whether to use existing cluster + default: "TRUE" + - name: CLUSTER_ID + type: string + description: OpenShift cluster ID to test against + default: "" + - name: CAD_PAGERDUTY_ROUTING_KEY + type: string + description: PagerDuty routing key for alerts + default: "" + + results: + - name: test-results + description: JUnit XML test results + - name: test-logs + description: Test execution logs + - name: test-status + description: Overall test status (PASS/FAIL) + - name: test-summary + description: Test execution summary + + workspaces: + - name: test-results + description: Workspace for storing test results and logs + mountPath: /workspace/test-results + + steps: + - name: setup-test-environment + image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest + script: | + #!/bin/bash + set -euo pipefail + + echo "Setting up test environment..." + + # Create workspace directories (no root needed) + mkdir -p $(workspaces.test-results.path)/junit + mkdir -p $(workspaces.test-results.path)/logs + mkdir -p $(workspaces.test-results.path)/reports + mkdir -p $(workspaces.test-results.path)/shared + + echo "Workspace directories created" + + # Verify directories exist and are writable + ls -la $(workspaces.test-results.path)/ + + # Log environment information + echo "=== Test Environment Setup ===" | tee $(workspaces.test-results.path)/logs/setup.log + echo "OSDE2E_CONFIGS: $(params.OSDE2E_CONFIGS)" | tee -a $(workspaces.test-results.path)/logs/setup.log + echo "TEST_IMAGE: $(params.TEST_IMAGE):$(params.IMAGE_TAG)" | tee -a $(workspaces.test-results.path)/logs/setup.log + echo "CLOUD_PROVIDER_REGION: $(params.CLOUD_PROVIDER_REGION)" | tee -a $(workspaces.test-results.path)/logs/setup.log + echo "USE_EXISTING_CLUSTER: $(params.USE_EXISTING_CLUSTER)" | tee -a $(workspaces.test-results.path)/logs/setup.log + echo "Setup completed successfully" | tee -a $(workspaces.test-results.path)/logs/setup.log + + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + + - name: run-osde2e-tests + image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest + script: | + #!/bin/bash + set -euo pipefail + + echo "Starting osde2e test execution..." + + # Set test start time + TEST_START_TIME=$(date -Iseconds) + echo "Test started at: $TEST_START_TIME" | tee $(workspaces.test-results.path)/logs/test-execution.log + + # Run osde2e tests with enhanced logging and result collection + TEST_EXIT_CODE=0 + /osde2e test \ + --only-health-check-nodes \ + --skip-destroy-cluster \ + --skip-must-gather \ + --configs $(params.OSDE2E_CONFIGS) \ + 2>&1 | tee $(workspaces.test-results.path)/logs/osde2e-full.log || TEST_EXIT_CODE=$? + + # Set test end time + TEST_END_TIME=$(date -Iseconds) + echo "Test completed at: $TEST_END_TIME" | tee -a $(workspaces.test-results.path)/logs/test-execution.log + echo "Test exit code: $TEST_EXIT_CODE" | tee -a $(workspaces.test-results.path)/logs/test-execution.log + + # Determine test status + if [ $TEST_EXIT_CODE -eq 0 ]; then + echo "PASS" > $(results.test-status.path) + echo "✅ All tests passed" | tee -a $(workspaces.test-results.path)/logs/test-execution.log + else + echo "FAIL" > $(results.test-status.path) + echo "❌ Tests failed with exit code: $TEST_EXIT_CODE" | tee -a $(workspaces.test-results.path)/logs/test-execution.log + fi + + # Create test summary + echo "=== Test Execution Summary ===" > $(workspaces.test-results.path)/logs/summary.log + echo "Start Time: $TEST_START_TIME" >> $(workspaces.test-results.path)/logs/summary.log + echo "End Time: $TEST_END_TIME" >> $(workspaces.test-results.path)/logs/summary.log + echo "Exit Code: $TEST_EXIT_CODE" >> $(workspaces.test-results.path)/logs/summary.log + echo "Status: $(cat $(results.test-status.path))" >> $(workspaces.test-results.path)/logs/summary.log + echo "Config: $(params.OSDE2E_CONFIGS)" >> $(workspaces.test-results.path)/logs/summary.log + + # Copy summary to result + cp $(workspaces.test-results.path)/logs/summary.log $(results.test-summary.path) + + # Exit with the original test exit code to maintain pipeline behavior + exit $TEST_EXIT_CODE + + env: + # Test configuration + - name: AD_HOC_TEST_IMAGES + value: "$(params.TEST_IMAGE):$(params.IMAGE_TAG)" + - name: CLOUD_PROVIDER_REGION + value: "$(params.CLOUD_PROVIDER_REGION)" + - name: LOG_BUCKET + value: "$(params.LOG_BUCKET)" + - name: USE_EXISTING_CLUSTER + value: "$(params.USE_EXISTING_CLUSTER)" + - name: CLUSTER_ID + value: "$(params.CLUSTER_ID)" + - name: CAD_PAGERDUTY_ROUTING_KEY + value: "$(params.CAD_PAGERDUTY_ROUTING_KEY)" + # osde2e output configuration + - name: REPORT_DIR + value: "$(workspaces.test-results.path)/reports" + - name: SHARED_DIR + value: "$(workspaces.test-results.path)/shared" + # Credentials from Secret and Parameters (Secret takes precedence) + - name: OCM_CLIENT_ID + valueFrom: + secretKeyRef: + name: osde2e-credentials + key: OCM_CLIENT_ID + optional: true + - name: OCM_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: osde2e-credentials + key: OCM_CLIENT_SECRET + optional: true + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: osde2e-credentials + key: AWS_ACCESS_KEY_ID + optional: true + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: osde2e-credentials + key: AWS_SECRET_ACCESS_KEY + optional: true + # Fallback to parameters if secret is not available + - name: OCM_CLIENT_ID_PARAM + value: "$(params.OCM_CLIENT_ID)" + - name: OCM_CLIENT_SECRET_PARAM + value: "$(params.OCM_CLIENT_SECRET)" + - name: AWS_ACCESS_KEY_ID_PARAM + value: "$(params.AWS_ACCESS_KEY_ID)" + - name: AWS_SECRET_ACCESS_KEY_PARAM + value: "$(params.AWS_SECRET_ACCESS_KEY)" + + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault + + - name: collect-test-results + image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest + script: | + #!/bin/bash + set -euo pipefail + + echo "Collecting and processing test results..." + + # Process JUnit XML results from osde2e REPORT_DIR + JUNIT_FOUND=false + + # Check for JUnit XML in reports directory (osde2e default location) + if [ -d "$(workspaces.test-results.path)/reports" ]; then + echo "Searching for JUnit XML files in reports directory..." + find $(workspaces.test-results.path)/reports -name "*.xml" -type f > /tmp/junit_files.txt 2>/dev/null || true + + if [ -s /tmp/junit_files.txt ]; then + echo "Found JUnit XML files, merging results..." + cat $(cat /tmp/junit_files.txt) > $(results.test-results.path) 2>/dev/null && JUNIT_FOUND=true + + # Log JUnit summary + echo "=== JUnit Results Summary ===" >> $(workspaces.test-results.path)/logs/summary.log + echo "JUnit XML files found:" >> $(workspaces.test-results.path)/logs/summary.log + cat /tmp/junit_files.txt >> $(workspaces.test-results.path)/logs/summary.log + fi + fi + + # Also check for junit.xml in the main reports directory (common osde2e pattern) + if [ -f "$(workspaces.test-results.path)/reports/junit.xml" ]; then + echo "Found junit.xml in reports directory" + cp $(workspaces.test-results.path)/reports/junit.xml $(results.test-results.path) + JUNIT_FOUND=true + fi + + # If no JUnit results found, create empty result + if [ "$JUNIT_FOUND" = "false" ]; then + echo "No JUnit XML results found, creating empty result" + echo '' > $(results.test-results.path) + echo "No JUnit XML files found" >> $(workspaces.test-results.path)/logs/summary.log + fi + + # Consolidate all logs + echo "Consolidating test logs..." + { + echo "=== OSDE2E Test Execution Logs ===" + echo "Generated at: $(date -Iseconds)" + echo "" + + if [ -f "$(workspaces.test-results.path)/logs/setup.log" ]; then + echo "=== Setup Logs ===" + cat $(workspaces.test-results.path)/logs/setup.log + echo "" + fi + + if [ -f "$(workspaces.test-results.path)/logs/test-execution.log" ]; then + echo "=== Test Execution Summary ===" + cat $(workspaces.test-results.path)/logs/test-execution.log + echo "" + fi + + if [ -f "$(workspaces.test-results.path)/logs/osde2e-full.log" ]; then + echo "=== Full OSDE2E Output ===" + cat $(workspaces.test-results.path)/logs/osde2e-full.log + fi + + # Include osde2e generated logs from REPORT_DIR + if [ -d "$(workspaces.test-results.path)/reports" ]; then + echo "" + echo "=== OSDE2E Generated Files ===" + find $(workspaces.test-results.path)/reports -type f -name "*.log" 2>/dev/null | while read logfile; do + if [ -f "$logfile" ]; then + echo "" + echo "=== $(basename $logfile) ===" + cat "$logfile" 2>/dev/null || echo "Could not read $logfile" + fi + done + + # Include test_output.log if it exists (osde2e default) + if [ -f "$(workspaces.test-results.path)/reports/test_output.log" ]; then + echo "" + echo "=== OSDE2E Test Output Log ===" + cat $(workspaces.test-results.path)/reports/test_output.log + fi + fi + } > $(results.test-logs.path) + + echo "Test result collection completed successfully" + + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: RuntimeDefault From 35fdd8b1b5934a300738b79e9c4ed960d4b99910 Mon Sep 17 00:00:00 2001 From: YiqinZhang Date: Wed, 3 Dec 2025 00:13:59 -0500 Subject: [PATCH 12/13] add s3 storage --- test/e2e/ClusterLogForwarder.yaml | 78 +++ test/e2e/TEKTON-RESULTS-README.md | 243 ------- test/e2e/doc/MANUAL-SETUP-GUIDE.md | 879 ++++++++++++++++++++++++ test/e2e/doc/QUERY-RESULTS-GUIDE.md | 487 +++++++++++++ test/e2e/doc/QUICK-REFERENCE.md | 124 ++++ test/e2e/doc/README.md | 218 ++++++ test/e2e/doc/REQUIRED-CONFIG-FILES.md | 657 ++++++++++++++++++ test/e2e/doc/STORAGE-GUIDE.md | 346 ++++++++++ test/e2e/doc/TROUBLESHOOTING.md | 341 +++++++++ test/e2e/loki-s3-policy.json | 22 + test/e2e/osde2e-pipeline.yml | 65 +- test/e2e/osde2e-tekton-task.yml | 46 +- test/e2e/run-with-credentials.sh | 369 ++++++++++ test/e2e/setup-complete-tekton-stack.sh | 849 +++++++++++++++++++++++ test/e2e/tekton-results-api.sh | 398 +++++++++++ test/e2e/tekton-results-reader.yaml | 38 + test/e2e/upload-to-s3-task.yml | 158 +++++ test/e2e/view-pipeline-logs.sh | 349 ++++++++++ 18 files changed, 5364 insertions(+), 303 deletions(-) create mode 100644 test/e2e/ClusterLogForwarder.yaml delete mode 100644 test/e2e/TEKTON-RESULTS-README.md create mode 100644 test/e2e/doc/MANUAL-SETUP-GUIDE.md create mode 100644 test/e2e/doc/QUERY-RESULTS-GUIDE.md create mode 100644 test/e2e/doc/QUICK-REFERENCE.md create mode 100644 test/e2e/doc/README.md create mode 100644 test/e2e/doc/REQUIRED-CONFIG-FILES.md create mode 100644 test/e2e/doc/STORAGE-GUIDE.md create mode 100644 test/e2e/doc/TROUBLESHOOTING.md create mode 100644 test/e2e/loki-s3-policy.json create mode 100755 test/e2e/run-with-credentials.sh create mode 100755 test/e2e/setup-complete-tekton-stack.sh create mode 100755 test/e2e/tekton-results-api.sh create mode 100644 test/e2e/tekton-results-reader.yaml create mode 100644 test/e2e/upload-to-s3-task.yml create mode 100755 test/e2e/view-pipeline-logs.sh diff --git a/test/e2e/ClusterLogForwarder.yaml b/test/e2e/ClusterLogForwarder.yaml new file mode 100644 index 0000000..5114aa6 --- /dev/null +++ b/test/e2e/ClusterLogForwarder.yaml @@ -0,0 +1,78 @@ +--- +# ClusterLogForwarder for Tekton Pipeline Logs to Loki +# Forwards only Tekton-managed application logs to LokiStack +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + name: tekton-to-loki + namespace: openshift-logging + labels: + app: tekton + component: logging +spec: + # Management state + managementState: Managed + + # Service Account (will be auto-created by the operator) + serviceAccount: + name: collector + + # Input: Filter only Tekton-managed application logs + inputs: + - name: tekton-logs + type: application + application: + selector: + matchExpressions: + # Match pods managed by Tekton Pipelines + - key: app.kubernetes.io/managed-by + operator: In + values: + - tekton-pipelines + - pipelinesascode.tekton.dev + # Optional: Limit to specific namespaces + namespaces: + - osde2e-tekton + + # Output: LokiStack in osde2e-tekton namespace + outputs: + - name: loki-output + type: lokiStack + lokiStack: + # Target LokiStack + target: + name: osde2e-loki + namespace: osde2e-tekton + + # Authentication + authentication: + token: + from: serviceAccount + + # TLS Configuration + tls: + ca: + key: service-ca.crt + configMapName: openshift-service-ca.crt + + # Label configuration for better log organization + labelKeys: + application: + # Don't use global labels, use custom ones + ignoreGlobal: true + labelKeys: + - log_type + - kubernetes.namespace_name + - kubernetes.pod_name + - kubernetes.container_name + - openshift_cluster_id + + # Pipeline: Connect input to output + pipelines: + - name: tekton-to-loki-pipeline + inputRefs: + - tekton-logs + outputRefs: + - loki-output + labels: + pipeline: tekton diff --git a/test/e2e/TEKTON-RESULTS-README.md b/test/e2e/TEKTON-RESULTS-README.md deleted file mode 100644 index ab17a3b..0000000 --- a/test/e2e/TEKTON-RESULTS-README.md +++ /dev/null @@ -1,243 +0,0 @@ -# Tekton Results Integration for OSDE2E Tests - -Complete guide for integrating Tekton Results with osde2e testing to enable structured test result collection and enhanced observability. - -## 📦 What's Included - -This integration adds three core files that replace the traditional Kubernetes Job with Tekton Pipelines: - -1. **`osde2e-tekton-task.yml`** (12KB) - Multi-step Task with structured result collection -2. **`osde2e-pipeline.yml`** (4.9KB) - Pipeline orchestration -3. **`e2e-tekton-template.yml`** (6.3KB) - OpenShift Template (referenced by app-interface) - -## 🔄 Required Changes in app-interface - -Only **2 changes** needed in `osde2e-focus-test.yaml`: - -### Change 1: Update Template Path -```yaml -resourceTemplates: -- name: saas-oeo-e2e-test - url: https://github.com/openshift/osd-example-operator - path: /test/e2e/e2e-tekton-template.yml # Changed from e2e-template.yml -``` - -### Change 2: Update Resource Types -```yaml -managedResourceTypes: -- PipelineRun.tekton.dev # Changed from: Job -- ServiceAccount -- Role.rbac.authorization.k8s.io # Changed from: ClusterRole -- RoleBinding.rbac.authorization.k8s.io # Changed from: ClusterRoleBinding -- PersistentVolumeClaim # New: for workspace storage -``` - -**Everything else unchanged**: parameters, credentials, test logic, promotion channels, Slack notifications. - -## ✅ Key Benefits - -| Feature | Job (Current) | Tekton + Results (New) | -|---------|--------------|------------------------| -| **Result Storage** | Pod logs (temporary) | Structured Results (persistent) | -| **Observability** | Single log stream | Multi-step visibility | -| **Result Format** | Plain text | Structured (status, logs, summary, JUnit) | -| **Historical Query** | Not supported | Supported (via Results API) | -| **UI** | Basic Pod logs | Enhanced (Pipeline graph, Results panel) | - -### Structured Results Captured -- **test-status**: PASS/FAIL status -- **test-logs**: Complete test execution logs -- **test-summary**: Test summary (timing, config, status) -- **test-results**: JUnit XML format - -## 🚀 Quick Start - -### 1. Deploy Tekton Resources -```bash -cd test/e2e - -# Deploy Task and Pipeline -oc apply -f osde2e-tekton-task.yml -n -oc apply -f osde2e-pipeline.yml -n - -# Verify deployment -oc get task osde2e-test-task -n -oc get pipeline osde2e-test-pipeline -n -``` - -### 2. Run a Test -```bash -# Using the template (app-interface style) -oc process -f e2e-tekton-template.yml \ - -p OSDE2E_CONFIGS="rosa,sts,int,ad-hoc-image" \ - -p TEST_IMAGE="quay.io/redhat-services-prod/oeo-cicada-tenant/osd-example-operator-e2e" \ - -p IMAGE_TAG="latest" \ - -p CLOUD_PROVIDER_REGION="us-east-1" \ - -p NAMESPACE="" \ - | oc apply -f - -``` - -### 3. View Results -```bash -# Get latest PipelineRun -PIPELINERUN=$(oc get pipelinerun -n --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1].metadata.name}') - -# View Results -oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.status.results}' | jq - -# Expected output: -# [{"name": "final-test-status", "value": "PASS"}] - -# View detailed logs -tkn pipelinerun logs $PIPELINERUN -f -n -``` - -## 📖 Complete Documentation - -For detailed information, see: -- **Complete Verification Guide (Chinese)**: [TEKTON-RESULTS-VERIFICATION.md](./TEKTON-RESULTS-VERIFICATION.md) - - Prerequisites and environment checks - - How Tekton Results works (architecture diagram) - - Step-by-step deployment instructions - - Multiple methods to view and verify Results - - OpenShift Console navigation guide - - Tekton Results API usage - - Troubleshooting common issues - - Complete verification checklist - -## 🔍 Quick Verification - -### Check Results in PipelineRun -```bash -# View PipelineRun status -oc get pipelinerun $PIPELINERUN -n - -# View Results -oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.status.results}' | jq - -# View TaskRun Results (detailed) -TASKRUN=$(oc get taskrun -n -l tekton.dev/pipelineRun=$PIPELINERUN -o jsonpath='{.items[0].metadata.name}') -oc get taskrun $TASKRUN -n -o jsonpath='{.status.results}' | jq - -# Expected: test-status, test-logs, test-summary, test-results -``` - -### View in OpenShift Console -``` -1. Navigate to: Pipelines → PipelineRuns -2. Click on your PipelineRun -3. See Results panel showing test-status -4. Click "Logs" tab to view step-by-step execution -``` - -## 🐛 Troubleshooting - -### PipelineRun Stuck in Pending -```bash -# Check events -oc describe pipelinerun $PIPELINERUN -n | grep -A 10 Events - -# Common causes: PVC creation, ServiceAccount permissions, Task/Pipeline missing -``` - -### No Results Visible -```bash -# Check TaskRun logs -tkn taskrun logs $TASKRUN -n - -# Verify Results annotations -oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.metadata.annotations}' | grep results -``` - -### Results Empty or Incomplete -```bash -# Tekton Results have a 4KB limit per result -# For large logs, only summary is stored in Results -# Complete logs available via: tkn pipelinerun logs -``` - -## 📊 Migration Path - -### Phase 1: osd-example-operator PR (Current) -- Add 3 new files to repository -- No changes to existing tests - -### Phase 2: app-interface PR (After merge) -- Update 2 fields in osde2e-focus-test.yaml -- Test in int01 → stage02 → production - -### Rollback -If issues occur, revert the app-interface path change: -```yaml -path: /test/e2e/e2e-template.yml # Back to Job template -``` - -## 📊 How It Works - -``` -┌─────────────────────────────────────────┐ -│ app-interface SaaS File │ -│ path: /test/e2e/e2e-tekton-template.yml│ -└──────────────────┬──────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────┐ -│ Template Creates: │ -│ - PipelineRun (with Results annotations)│ -│ - ServiceAccount │ -│ - Role/RoleBinding │ -│ - PersistentVolumeClaim │ -└──────────────────┬──────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────┐ -│ Pipeline Executes: │ -│ - osde2e-test-task │ -│ 1. Fix permissions │ -│ 2. Setup environment │ -│ 3. Run osde2e tests │ -│ 4. Collect results (structured) │ -└──────────────────┬──────────────────────┘ - │ - ▼ -┌─────────────────────────────────────────┐ -│ Results Stored: │ -│ - TaskRun.status.results[] (4 results) │ -│ - PipelineRun.status.results[] (1 result)│ -│ - (Optional) Results API database │ -└─────────────────────────────────────────┘ -``` - -## 📊 Success Indicators - -When you see this output, integration is successful: - -```bash -$ oc get pipelinerun $PIPELINERUN -n -NAME SUCCEEDED REASON STARTTIME COMPLETIONTIME -osde2e-...-1730300000 True Succeeded 5m 2m - -$ oc get pipelinerun $PIPELINERUN -n -o jsonpath='{.status.results}' | jq -[ - { - "name": "final-test-status", - "value": "PASS" - } -] - -$ oc get taskrun $TASKRUN -n -o jsonpath='{.status.results[*].name}' -test-status test-logs test-summary test-results -``` - -## 🔗 Additional Resources - -- [Tekton Results Official Docs](https://tekton.dev/docs/pipelines/results/) -- [OpenShift Pipelines Docs](https://docs.openshift.com/pipelines/) -- [Complete Verification Guide (Chinese)](./TEKTON-RESULTS-VERIFICATION.md) - ---- - -**Need Help?** -- See [TEKTON-RESULTS-VERIFICATION.md](./TEKTON-RESULTS-VERIFICATION.md) for detailed step-by-step guide -- Contact via JIRA: SDCICD-1672 - diff --git a/test/e2e/doc/MANUAL-SETUP-GUIDE.md b/test/e2e/doc/MANUAL-SETUP-GUIDE.md new file mode 100644 index 0000000..93131d9 --- /dev/null +++ b/test/e2e/doc/MANUAL-SETUP-GUIDE.md @@ -0,0 +1,879 @@ +# Complete Manual Setup Guide + +Step-by-step manual guide for setting up OSDE2E Tekton Pipeline from scratch. + +--- + +## Table of Contents + +1. [Prerequisites](#1-prerequisites) +2. [Install OpenShift Pipelines](#2-install-openshift-pipelines) +3. [Enable Tekton Results](#3-enable-tekton-results) +4. [Install Loki Operator](#4-install-loki-operator) +5. [Configure S3 Storage](#5-configure-s3-storage) +6. [Deploy LokiStack](#6-deploy-lokistack) +7. [Configure ClusterLogForwarder](#7-configure-clusterlogforwarder) +8. [Deploy Tekton Resources](#8-deploy-tekton-resources) +9. [Create Credentials Secret](#9-create-credentials-secret) +10. [Run Tests](#10-run-tests) +11. [Retrieve Test Results](#11-retrieve-test-results) +12. [Troubleshooting](#12-troubleshooting) + +--- + +## 1. Prerequisites + +### 1.1 Required Tools + +```bash +# Check oc CLI +oc version +# Expected: Client Version: 4.x.x + +# Check jq (JSON processor) +jq --version +# Expected: jq-1.6 or higher + +# Check AWS CLI (for S3 setup) +aws --version +# Expected: aws-cli/2.x.x +``` + +**Install if missing:** +```bash +# oc CLI - Download from OpenShift Console → ? → Command Line Tools +# jq - brew install jq (macOS) or apt install jq (Linux) +# aws CLI - brew install awscli (macOS) or pip install awscli +``` + +### 1.2 Cluster Access + +```bash +# Login to OpenShift cluster +oc login https://api.:6443 -u -p +# Or use token: +oc login --token= --server=https://api.:6443 + +# Verify login +oc whoami +# Expected: your-username + +# Verify cluster connection +oc whoami --show-server +# Expected: https://api.:6443 +``` + +### 1.3 Check Admin Permissions + +```bash +# Check if you have cluster-admin +oc auth can-i '*' '*' --all-namespaces +# Expected: yes +``` + +--- + +## 2. Install OpenShift Pipelines + +### 2.1 Check if Already Installed + +```bash +oc get tektonconfig config +``` + +If you see output, skip to [Step 3](#3-enable-tekton-results). If "not found", continue below. + +### 2.2 Create Subscription + +```bash +cat < /tmp/loki-s3-policy.json << 'EOF' +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": [ + "arn:aws:s3:::osde2e-loki-logs", + "arn:aws:s3:::osde2e-loki-logs/*" + ] + }] +} +EOF +``` + +Apply policy: +```bash +aws iam put-user-policy \ + --user-name loki-storage-user \ + --policy-name LokiS3Access \ + --policy-document file:///tmp/loki-s3-policy.json + +# Verify policy attached +aws iam get-user-policy --user-name loki-storage-user --policy-name LokiS3Access +``` + +--- + +## 6. Deploy LokiStack + +### 6.1 Create Namespace + +```bash +oc new-project osde2e-tekton || oc project osde2e-tekton +``` + +### 6.2 Create S3 Credentials Secret + +```bash +# Set your credentials (from Step 5.2) +export AWS_ACCESS_KEY_ID="AKIA..." +export AWS_SECRET_ACCESS_KEY="your-secret-key" +export S3_BUCKET_NAME="osde2e-loki-logs" +export AWS_REGION="us-east-1" + +# Create secret +oc create secret generic loki-s3-credentials \ + --from-literal=access_key_id="$AWS_ACCESS_KEY_ID" \ + --from-literal=access_key_secret="$AWS_SECRET_ACCESS_KEY" \ + --from-literal=bucketnames="$S3_BUCKET_NAME" \ + --from-literal=endpoint="https://s3.${AWS_REGION}.amazonaws.com" \ + --from-literal=region="$AWS_REGION" \ + -n osde2e-tekton + +# Verify +oc get secret loki-s3-credentials -n osde2e-tekton +``` + +### 6.3 Check Node Resources + +```bash +# ⚠️ IMPORTANT: LokiStack size must match SINGLE node capacity, not total cluster! + +# Check worker nodes +oc get nodes -l node-role.kubernetes.io/worker \ + -o custom-columns='NAME:.metadata.name,CPU:.status.allocatable.cpu,MEM:.status.allocatable.memory' + +# Example output: +# NAME CPU MEM +# ip-10-0-22-97.ec2.internal 3500m 29140160Ki (~28Gi) +# ip-10-0-30-127.ec2.internal 3500m 12649672Ki (~12Gi) +``` + +**LokiStack Size Selection:** + +| Node CPU | Node Memory | Recommended Size | +|----------|-------------|------------------| +| < 4 CPU | < 16Gi | `1x.demo` ✅ | +| 4-6 CPU | 16-32Gi | `1x.extra-small` | +| >= 7 CPU | >= 32Gi | `1x.small` | + +### 6.4 Create LokiStack + +```bash +# Use 1x.demo for most dev/test clusters +cat < 📄 **Required Files:** See [REQUIRED-CONFIG-FILES.md](./REQUIRED-CONFIG-FILES.md) for detailed file descriptions. + +### 8.1 Navigate to E2E Directory + +```bash +cd /path/to/osd-example-operator/test/e2e + +# Verify required files exist +ls -la osde2e-tekton-task.yml upload-to-s3-task.yml osde2e-pipeline.yml e2e-tekton-template.yml +``` + +### 8.2 Apply Task + +```bash +# Apply main test Task +oc apply -f osde2e-tekton-task.yml -n osde2e-tekton + +# Apply S3 upload Task +oc apply -f upload-to-s3-task.yml -n osde2e-tekton + +# Verify +oc get task -n osde2e-tekton +# Expected: +# NAME AGE +# osde2e-test-task xx +# upload-to-s3-task xx +``` + +### 8.3 Apply Pipeline + +```bash +oc apply -f osde2e-pipeline.yml -n osde2e-tekton + +# Verify +oc get pipeline -n osde2e-tekton +# Expected: +# NAME AGE +# osde2e-test-pipeline xx +``` + +--- + +## 9. Create Credentials Secret + +### 9.1 Gather Credentials + +You need: +- **OCM_CLIENT_ID**: Usually "cloud-services" +- **OCM_CLIENT_SECRET**: Your OCM offline token +- **AWS_ACCESS_KEY_ID**: For ROSA provider +- **AWS_SECRET_ACCESS_KEY**: For ROSA provider + +**Get OCM Token:** +1. Visit https://console.redhat.com/openshift/ +2. Click user menu → API Tokens → Load Token +3. Or from `~/.config/ocm/ocm.json` after `rosa login` + +### 9.2 Create Secret + +```bash +# Set your credentials +export OCM_CLIENT_ID="cloud-services" +export OCM_CLIENT_SECRET="your-ocm-offline-token" +export AWS_ACCESS_KEY_ID="AKIA..." +export AWS_SECRET_ACCESS_KEY="your-aws-secret" + +# Create secret +oc create secret generic osde2e-credentials \ + --from-literal=OCM_CLIENT_ID="$OCM_CLIENT_ID" \ + --from-literal=OCM_CLIENT_SECRET="$OCM_CLIENT_SECRET" \ + --from-literal=AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ + --from-literal=AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ + -n osde2e-tekton + +# Verify +oc get secret osde2e-credentials -n osde2e-tekton +``` + +--- + +## 10. Run Tests + +### 10.1 Get Cluster ID + +```bash +# Method 1: From rosa CLI +rosa list clusters +# Note the cluster ID (e.g., abc123def456) + +# Method 2: From current cluster +oc get clusterversion -o jsonpath='{.items[0].spec.clusterID}' +``` + +### 10.2 Process and Apply Template + +```bash +# Set variables +export CLUSTER_ID="your-cluster-id" +export TEST_IMAGE="quay.io/redhat-services-prod/oeo-cicada-tenant/osd-example-operator-e2e" +export IMAGE_TAG="latest" +export OSDE2E_CONFIGS="rosa,sts,int,ad-hoc-image" + +# Process template +oc process -f e2e-tekton-template.yml \ + -p OSDE2E_CONFIGS="$OSDE2E_CONFIGS" \ + -p TEST_IMAGE="$TEST_IMAGE" \ + -p IMAGE_TAG="$IMAGE_TAG" \ + -p CLUSTER_ID="$CLUSTER_ID" \ + -p S3_RESULTS_BUCKET="osde2e-loki-logs" \ + | oc apply -f - -n osde2e-tekton +``` + +### 10.3 Monitor Test Progress + +```bash +# Get PipelineRun name +PIPELINERUN=$(oc get pipelinerun -n osde2e-tekton \ + --sort-by=.metadata.creationTimestamp \ + -o jsonpath='{.items[-1].metadata.name}') + +echo "PipelineRun: $PIPELINERUN" + +# Watch status +oc get pipelinerun $PIPELINERUN -n osde2e-tekton -w + +# Or view live logs +oc logs -f -l tekton.dev/pipelineRun=$PIPELINERUN -n osde2e-tekton --all-containers +``` + +--- + +## 11. Retrieve Test Results + +### 11.1 Check PipelineRun Status + +```bash +# Get latest PipelineRun +PIPELINERUN=$(oc get pipelinerun -n osde2e-tekton \ + --sort-by=.metadata.creationTimestamp \ + -o jsonpath='{.items[-1].metadata.name}') + +# Check status +oc get pipelinerun $PIPELINERUN -n osde2e-tekton + +# Expected: +# NAME SUCCEEDED REASON STARTTIME COMPLETIONTIME +# osde2e-osd-example-operator-latest-xxx True Succeeded 10m 5m +``` + +### 11.2 Get S3 Pre-signed URLs + +```bash +# View upload task logs for pre-signed URLs +oc logs -l tekton.dev/pipelineRun=$PIPELINERUN,tekton.dev/task=upload-to-s3-task \ + -n osde2e-tekton --tail=100 + +# Expected output includes: +# 📄 osde2e-full.log: +# https://osde2e-loki-logs.s3.us-east-1.amazonaws.com/test-results/2025-12-03/...?X-Amz-... +``` + +### 11.3 View Pod Logs (While Running) + +```bash +# List pods +oc get pods -n osde2e-tekton -l tekton.dev/pipelineRun=$PIPELINERUN + +# View specific pod logs +oc logs $PIPELINERUN-osde2e-test-pod -n osde2e-tekton + +# Follow logs in real-time +oc logs -f $PIPELINERUN-osde2e-test-pod -n osde2e-tekton +``` + +### 11.4 Access Workspace PVC (After Pod Deleted) + +```bash +# Find PVC +PVC_NAME=$(oc get pvc -n osde2e-tekton -l tekton.dev/pipelineRun=$PIPELINERUN \ + -o jsonpath='{.items[0].metadata.name}') + +echo "PVC: $PVC_NAME" + +# Create debug pod +oc run pvc-reader --rm -it --restart=Never \ + --image=registry.access.redhat.com/ubi9/ubi-minimal \ + --overrides="{ + \"spec\": { + \"containers\": [{ + \"name\": \"pvc-reader\", + \"image\": \"registry.access.redhat.com/ubi9/ubi-minimal\", + \"command\": [\"sh\"], + \"stdin\": true, + \"tty\": true, + \"volumeMounts\": [{ + \"name\": \"test-results\", + \"mountPath\": \"/workspace\" + }] + }], + \"volumes\": [{ + \"name\": \"test-results\", + \"persistentVolumeClaim\": { + \"claimName\": \"$PVC_NAME\" + } + }] + } + }" \ + -n osde2e-tekton + +# Inside pod: +# ls /workspace/ +# cat /workspace/logs/osde2e-full.log +# cat /workspace/reports/test_output.log +``` + +### 11.5 Query Tekton Results API + +```bash +# Create token +TOKEN=$(oc create token tekton-results-reader -n openshift-pipelines --duration=1h 2>/dev/null || oc whoami -t) + +# Get Results API endpoint +RESULTS_SVC="https://tekton-results-api-service.openshift-pipelines.svc:8080" + +# Query results (via port-forward) +oc port-forward svc/tekton-results-api-service 8080:8080 -n openshift-pipelines & +PF_PID=$! +sleep 3 + +curl -sk -H "Authorization: Bearer $TOKEN" \ + "https://localhost:8080/apis/results.tekton.dev/v1alpha2/parents/osde2e-tekton/results" \ + | jq '.results | .[-5:] | .[].name' + +kill $PF_PID +``` + +### 11.6 Download from S3 (CLI) + +```bash +# List results +aws s3 ls s3://osde2e-loki-logs/test-results/ --recursive | head -20 + +# Download specific test results +aws s3 cp s3://osde2e-loki-logs/test-results/2025-12-03/$PIPELINERUN-xxx/ \ + ./results/ --recursive + +# Generate pre-signed URL manually +aws s3 presign s3://osde2e-loki-logs/test-results/2025-12-03/$PIPELINERUN-xxx/logs/osde2e-full.log \ + --expires-in 604800 +``` + +### 11.7 Using opc CLI (Recommended) + +```bash +# Install opc if not available +# See doc/OPC-CLI-SETUP.md + +# List PipelineRuns +opc pipelinerun list -n osde2e-tekton + +# View logs (works even after pod deleted, via Tekton Results) +opc pipelinerun logs $PIPELINERUN -n osde2e-tekton + +# View specific task logs +opc pipelinerun logs $PIPELINERUN -n osde2e-tekton --task osde2e-test +``` + +--- + +## 12. Troubleshooting + +### Issue: Loki Operator Channel Not Found + +**Symptom:** `no operators found in channel stable` + +**Solution:** Use versioned channel: +```bash +# Check available channels +oc get packagemanifest loki-operator -n openshift-marketplace \ + -o jsonpath='{.status.channels[*].name}' +# Use: stable-6.4 (not just "stable") +``` + +### Issue: LokiStack Pods Pending + +**Symptom:** Ingester/Compactor stuck in Pending + +**Solution:** Reduce LokiStack size: +```bash +oc patch lokistack osde2e-loki -n osde2e-tekton \ + --type=merge -p '{"spec":{"size":"1x.demo"}}' + +oc delete pod osde2e-loki-ingester-0 -n osde2e-tekton --force --grace-period=0 +``` + +### Issue: S3 Access Denied + +**Symptom:** `AccessDenied: User xxx is not authorized to perform: s3:PutObject` + +**Solution:** Add IAM permissions (see Step 5.3) + +### Issue: Tekton Results Not Working + +**Symptom:** `tekton-results` pods not found + +**Solution:** Check correct namespace (`openshift-pipelines`, NOT `tekton-results`): +```bash +oc get pods -n openshift-pipelines | grep tekton-results +``` + +### Issue: AWS Credentials Missing + +**Symptom:** `AWS_ACCESS_KEY_ID is not set` + +**Solution:** Ensure secret contains AWS credentials: +```bash +oc get secret osde2e-credentials -n osde2e-tekton -o yaml | grep AWS +# Should see: AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY +``` + +--- + +## Quick Reference + +### Essential Commands + +```bash +# Check components +oc get tektonconfig config # Tekton Pipelines +oc get pods -n openshift-pipelines | grep results # Tekton Results +oc get lokistack -n osde2e-tekton # LokiStack +oc get clusterlogforwarder -n openshift-logging # Log Forwarder + +# Run test +oc process -f e2e-tekton-template.yml -p CLUSTER_ID=xxx | oc apply -f - -n osde2e-tekton + +# View results +oc logs -upload-results-to-s3-pod -n osde2e-tekton # S3 URLs +opc pipelinerun logs -n osde2e-tekton # Full logs +``` + diff --git a/test/e2e/doc/QUERY-RESULTS-GUIDE.md b/test/e2e/doc/QUERY-RESULTS-GUIDE.md new file mode 100644 index 0000000..6b8b88a --- /dev/null +++ b/test/e2e/doc/QUERY-RESULTS-GUIDE.md @@ -0,0 +1,487 @@ +# Query Results Guide + +Methods to retrieve test logs and results from all storage layers. + +--- + +## Quick Reference + +| Goal | Command | +|------|---------| +| Get S3 download URLs | `oc logs -upload-results-to-s3-pod -n osde2e-tekton` | +| View historical logs | `opc pipelinerun logs -n osde2e-tekton` | +| View live logs | `oc logs -f -n osde2e-tekton` | +| Query via API | `./tekton-results-api.sh query` | + +--- + +## Method 1: opc CLI (Recommended) + +The `opc` CLI is the easiest way to access Tekton Results and logs. + +### Installation + +```bash +# macOS +brew tap openshift-pipelines/pipelines-as-code +brew install opc + +# Linux +curl -LO https://github.com/openshift-pipelines/opc/releases/latest/download/opc_linux_amd64.tar.gz +tar xzf opc_linux_amd64.tar.gz +sudo mv opc /usr/local/bin/ + +# Verify +opc version +``` + +### Basic Commands + +```bash +# List PipelineRuns +opc pipelinerun list -n osde2e-tekton + +# View PipelineRun logs (works even after pod deletion) +opc pipelinerun logs -n osde2e-tekton + +# View specific TaskRun logs +opc taskrun logs -n osde2e-tekton + +# Follow logs in real-time +opc pipelinerun logs -n osde2e-tekton --follow + +# View specific task in pipeline +opc pipelinerun logs -n osde2e-tekton --task osde2e-test +``` + +### Filtering and Searching + +```bash +# Filter by label +opc pipelinerun list -n osde2e-tekton -l app=osde2e + +# Limit results +opc pipelinerun list -n osde2e-tekton --limit 10 + +# Search in logs +opc pipelinerun logs -n osde2e-tekton | grep -i "error\|fail" + +# Save to file +opc pipelinerun logs -n osde2e-tekton > logs.txt +``` + +--- + +## Method 2: oc CLI (Live Pods Only) + +Use when pods are still running or recently completed. + +```bash +# List pods for a PipelineRun +oc get pods -n osde2e-tekton -l tekton.dev/pipelineRun= + +# View all container logs +oc logs -n osde2e-tekton --all-containers + +# Follow logs +oc logs -f -n osde2e-tekton + +# View specific container +oc logs -c step-run-osde2e-tests -n osde2e-tekton +``` + +**Note:** Pod logs are deleted when pods are removed. Use `opc` for historical access. + +--- + +## Method 3: Tekton Results API + +Direct API access for automation and custom queries. + +### Quick Query + +```bash +# Use the helper script +./tekton-results-api.sh query + +# Or list formatted results +./tekton-results-api.sh list +``` + +### Manual API Access + +```bash +# Port-forward to API +oc port-forward svc/tekton-results-api-service 8080:8080 -n openshift-pipelines & + +# Get token +TOKEN=$(oc whoami -t) + +# List all results +curl -sk -H "Authorization: Bearer $TOKEN" \ + "https://localhost:8080/apis/results.tekton.dev/v1alpha2/parents/osde2e-tekton/results" \ + | jq '.results | length' + +# Get specific result +curl -sk -H "Authorization: Bearer $TOKEN" \ + "https://localhost:8080/apis/results.tekton.dev/v1alpha2/parents/osde2e-tekton/results/" + +# List records (TaskRuns) +curl -sk -H "Authorization: Bearer $TOKEN" \ + "https://localhost:8080/apis/results.tekton.dev/v1alpha2/parents/osde2e-tekton/results/-/records" +``` + +### API Endpoints + +| Endpoint | Description | +|----------|-------------| +| `GET /parents/-/results` | List all results (all namespaces) | +| `GET /parents/{ns}/results` | List results in namespace | +| `GET /parents/{ns}/results/{name}` | Get specific result | +| `GET /parents/{ns}/results/-/records` | List all records | + +--- + +## Method 4: PostgreSQL (Advanced) + +Direct database access for debugging and understanding how Tekton Results stores data. + +### Connection Information + +| Property | Value | +|----------|-------| +| **Pod Name** | `tekton-results-postgres-0` | +| **Namespace** | `openshift-pipelines` | +| **Service** | `tekton-results-postgres-service` | +| **Port** | 5432 (internal), 32576 (NodePort) | +| **Database** | `tekton-results` | +| **Username** | `result` | + +### Get Credentials + +```bash +# Get database name +oc get configmap tekton-results-postgres -n openshift-pipelines \ + -o jsonpath='{.data.POSTGRES_DB}' + +# Get username +oc get secret tekton-results-postgres -n openshift-pipelines \ + -o jsonpath='{.data.POSTGRES_USER}' | base64 -d + +# Get password +oc get secret tekton-results-postgres -n openshift-pipelines \ + -o jsonpath='{.data.POSTGRES_PASSWORD}' | base64 -d +``` + +### Connect to Database + +**Option 1: Enter Pod Directly (Recommended)** + +```bash +# Enter PostgreSQL pod +oc rsh -n openshift-pipelines tekton-results-postgres-0 + +# Connect to database +psql -U result -d tekton-results +``` + +**Option 2: Remote SQL Execution** + +```bash +# Execute single command +oc exec tekton-results-postgres-0 -n openshift-pipelines -- \ + psql -U result -d tekton-results -c "SELECT COUNT(*) FROM results;" + +# Execute SQL file +oc exec tekton-results-postgres-0 -n openshift-pipelines -- \ + psql -U result -d tekton-results -f /tmp/query.sql +``` + +**Option 3: Port Forward (for external tools)** + +```bash +# Forward port +oc port-forward svc/tekton-results-postgres-service 5432:5432 -n openshift-pipelines & + +# Connect with psql client +PGPASSWORD=$(oc get secret tekton-results-postgres -n openshift-pipelines \ + -o jsonpath='{.data.POSTGRES_PASSWORD}' | base64 -d) \ + psql -h localhost -U result -d tekton-results +``` + +--- + +### Database Schema + +Tekton Results uses two main tables: + +``` +┌─────────────────────────────────────────────────────────────┐ +│ PostgreSQL Database │ +│ (tekton-results) │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────┐ ┌─────────────────────────┐│ +│ │ results │ │ records ││ +│ ├─────────────────────────┤ ├─────────────────────────┤│ +│ │ name (PK) │ │ name (PK) ││ +│ │ namespace │ │ result_name (FK) ││ +│ │ type │ │ type ││ +│ │ data (JSONB) │ │ data (JSONB) ││ +│ │ create_time │ │ create_time ││ +│ │ update_time │ │ update_time ││ +│ └─────────────────────────┘ │ etag ││ +│ └─────────────────────────┘│ +│ Stores: PipelineRun metadata Stores: TaskRun details │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + +**What is stored:** +- `results` table: PipelineRun metadata (name, status, timestamps, parameters) +- `records` table: TaskRun details (logs path, result values, step outputs) +- `data` column: Full YAML/JSON of the resource (stored as JSONB) + +**What is NOT stored:** +- Actual log content (stored in Loki/S3) +- Test output files (stored in PVC/S3) +- Container images + +--- + +### Useful Queries + +**View Tables and Schema** + +```sql +-- List all tables +\dt + +-- View table structure +\d results +\d records + +-- Check table sizes +SELECT relname, pg_size_pretty(pg_total_relation_size(relid)) +FROM pg_catalog.pg_statio_user_tables +ORDER BY pg_total_relation_size(relid) DESC; +``` + +**Query Results (PipelineRuns)** + +```sql +-- Recent PipelineRuns +SELECT name, namespace, type, + create_time, + data->>'status' as status +FROM results +WHERE namespace = 'osde2e-tekton' +ORDER BY create_time DESC +LIMIT 10; + +-- Search by name pattern +SELECT name, create_time +FROM results +WHERE name LIKE '%osde2e%' +ORDER BY create_time DESC; + +-- Count by namespace +SELECT namespace, COUNT(*) as count +FROM results +GROUP BY namespace +ORDER BY count DESC; + +-- Get full PipelineRun definition +SELECT data +FROM results +WHERE name = 'osde2e-tekton/results/your-pipelinerun-name' +\gx +``` + +**Query Records (TaskRuns)** + +```sql +-- List TaskRuns for a PipelineRun +SELECT r.name, r.type, r.create_time +FROM records r +WHERE r.result_name LIKE '%your-pipelinerun%' +ORDER BY r.create_time; + +-- Get TaskRun status +SELECT name, + data->'status'->'conditions'->0->>'status' as status, + data->'status'->'conditions'->0->>'reason' as reason +FROM records +WHERE name LIKE '%osde2e-test%' +ORDER BY create_time DESC +LIMIT 5; + +-- Get Task results (PASS/FAIL) +SELECT name, + data->'status'->'taskResults' as task_results +FROM records +WHERE name LIKE '%osde2e%' +ORDER BY create_time DESC +LIMIT 5; +``` + +**Extract Specific Data** + +```sql +-- Get test status from TaskRun results +SELECT name, + jsonb_path_query(data, '$.status.taskResults[*] ? (@.name == "test-status")') as test_status +FROM records +WHERE name LIKE '%osde2e-test%' +ORDER BY create_time DESC +LIMIT 10; + +-- Get pipeline parameters +SELECT name, + data->'spec'->'params' as params +FROM results +WHERE namespace = 'osde2e-tekton' +ORDER BY create_time DESC +LIMIT 5; + +-- Export to JSON file +\copy (SELECT data FROM results WHERE name LIKE '%osde2e%' LIMIT 1) TO '/tmp/result.json' +``` + +**Maintenance Queries** + +```sql +-- Check database size +SELECT pg_size_pretty(pg_database_size('tekton-results')); + +-- Count total records +SELECT + (SELECT COUNT(*) FROM results) as results_count, + (SELECT COUNT(*) FROM records) as records_count; + +-- Find old records (for cleanup planning) +SELECT DATE(create_time) as date, COUNT(*) +FROM results +GROUP BY DATE(create_time) +ORDER BY date DESC +LIMIT 30; +``` + +--- + +### Data Retention + +Tekton Results default retention: +- Results are kept for **90 days** by default +- Automatic pruning runs periodically +- Configure via TektonConfig: + +```yaml +spec: + result: + disabled: false + options: + deployments: + api: + args: + - "-retention_policies_enabled=true" + - "-retention_max_age=2160h" # 90 days +``` + +--- + +### Common Use Cases + +**1. Debug Failed PipelineRun** + +```sql +-- Find failed runs +SELECT name, create_time, + data->'status'->'conditions'->0->>'message' as error_message +FROM results +WHERE data->'status'->'conditions'->0->>'status' = 'False' + AND namespace = 'osde2e-tekton' +ORDER BY create_time DESC +LIMIT 5; +``` + +**2. Get Test Summary** + +```sql +-- Extract test summary from records +SELECT + name, + jsonb_path_query_first(data, '$.status.taskResults[*] ? (@.name == "test-summary").value') as summary +FROM records +WHERE name LIKE '%osde2e-test%' +ORDER BY create_time DESC +LIMIT 10; +``` + +**3. Export All Results for Analysis** + +```bash +# Export to CSV +oc exec tekton-results-postgres-0 -n openshift-pipelines -- \ + psql -U result -d tekton-results -c \ + "COPY (SELECT name, namespace, create_time FROM results WHERE namespace='osde2e-tekton') TO STDOUT WITH CSV HEADER" \ + > results.csv +``` + +--- + +## Troubleshooting + +### opc: Unable to Connect to Results API + +```bash +# Check if Results is enabled +oc get tektonconfig config -o jsonpath='{.spec.result.disabled}' +# Should be "false" + +# Check Results pods +oc get pods -n openshift-pipelines | grep tekton-results +# Should show: api, watcher, postgres all Running +``` + +### opc: No Results Found + +Results may not be recorded. Check annotations: +```bash +oc get pipelinerun -n osde2e-tekton -o yaml | grep "results.tekton.dev" +# Should have: +# results.tekton.dev/record: "true" +# results.tekton.dev/log: "true" +``` + +### API: 403 Forbidden + +Token may have expired: +```bash +# Get fresh token +TOKEN=$(oc whoami -t) + +# Or create service account token +TOKEN=$(oc create token tekton-results-reader -n openshift-pipelines --duration=1h) +``` + +### PostgreSQL: Connection Refused + +Pod may not be ready: +```bash +oc get pods -n openshift-pipelines | grep postgres +# Wait for Running state +``` + +--- + +## Method Comparison + +| Method | Best For | Historical Access | Ease of Use | +|--------|----------|-------------------|-------------| +| opc CLI | Daily use | Yes | Easy | +| oc logs | Live debugging | No | Easy | +| Results API | Automation | Yes | Medium | +| PostgreSQL | Deep debugging | Yes | Advanced | + +**Recommendation:** Use `opc` for most cases. Use API for automation. Use PostgreSQL only for debugging. diff --git a/test/e2e/doc/QUICK-REFERENCE.md b/test/e2e/doc/QUICK-REFERENCE.md new file mode 100644 index 0000000..e29e743 --- /dev/null +++ b/test/e2e/doc/QUICK-REFERENCE.md @@ -0,0 +1,124 @@ +# Quick Reference + +Command reference for OSDE2E Tekton Pipeline operations. + +--- + +## Setup + +```bash +# Complete automated setup +./setup-complete-tekton-stack.sh +``` + +--- + +## Running Tests + +```bash +# Interactive script (recommended) +./run-with-credentials.sh + +# Manual template processing +oc process -f e2e-tekton-template.yml \ + -p CLUSTER_ID= \ + -p TEST_IMAGE=quay.io/your-org/image \ + -p IMAGE_TAG=latest \ + | oc apply -f - -n osde2e-tekton +``` + +--- + +## Viewing Results + +```bash +# List PipelineRuns +oc get pipelinerun -n osde2e-tekton --sort-by=.metadata.creationTimestamp + +# View logs with script +./view-pipeline-logs.sh +./view-pipeline-logs.sh latest + +# Get S3 download URLs (after completion) +oc logs -upload-results-to-s3-pod -n osde2e-tekton + +# View with opc CLI +opc pipelinerun logs -n osde2e-tekton +``` + +--- + +## Diagnostics + +```bash +# Check all components +oc get tektonconfig config # Tekton Pipelines +oc get pods -n openshift-pipelines | grep tekton-results # Tekton Results +oc get csv -n openshift-operators | grep loki # Loki Operator +oc get lokistack osde2e-loki -n osde2e-tekton # LokiStack +oc get pods -n osde2e-tekton | grep loki # Loki Pods +oc get clusterlogforwarder -n openshift-logging # Log Forwarder +``` + +--- + +## Common Fixes + +```bash +# Fix: LokiStack pods Pending (reduce size) +oc patch lokistack osde2e-loki -n osde2e-tekton \ + --type=merge -p '{"spec":{"size":"1x.demo"}}' + +# Recreate stuck pods +oc delete pod osde2e-loki-ingester-0 osde2e-loki-compactor-0 \ + -n osde2e-tekton --force --grace-period=0 + +# Recreate credentials secret +oc delete secret osde2e-credentials -n osde2e-tekton +oc create secret generic osde2e-credentials \ + --from-literal=OCM_CLIENT_ID=xxx \ + --from-literal=OCM_CLIENT_SECRET=xxx \ + --from-literal=AWS_ACCESS_KEY_ID=xxx \ + --from-literal=AWS_SECRET_ACCESS_KEY=xxx \ + -n osde2e-tekton +``` + +--- + +## Template Parameters + +| Parameter | Required | Default | Description | +|-----------|----------|---------|-------------| +| `OSDE2E_CONFIGS` | Yes | - | Configuration string (e.g., `rosa,sts,int,ad-hoc-image`) | +| `TEST_IMAGE` | Yes | - | Test image to run | +| `IMAGE_TAG` | Yes | - | Image tag | +| `CLUSTER_ID` | No | - | Existing cluster ID | +| `S3_RESULTS_BUCKET` | No | `osde2e-loki-logs` | S3 bucket for results | +| `CLOUD_PROVIDER_REGION` | No | `us-east-1` | AWS region | + +--- + +## Emergency Commands + +```bash +# Stop all running PipelineRuns +oc delete pipelinerun --all -n osde2e-tekton + +# Clean up old PVCs +oc delete pvc -l app=osde2e -n osde2e-tekton + +# Restart Loki components +oc rollout restart statefulset -n osde2e-tekton +oc rollout restart deployment -n osde2e-tekton +``` + +--- + +## Related Documentation + +| Document | Description | +|----------|-------------| +| [MANUAL-SETUP-GUIDE.md](./MANUAL-SETUP-GUIDE.md) | Complete manual setup | +| [TROUBLESHOOTING.md](./TROUBLESHOOTING.md) | Common issues and solutions | +| [STORAGE-GUIDE.md](./STORAGE-GUIDE.md) | Storage architecture | +| [QUERY-RESULTS-GUIDE.md](./QUERY-RESULTS-GUIDE.md) | Results retrieval | \ No newline at end of file diff --git a/test/e2e/doc/README.md b/test/e2e/doc/README.md new file mode 100644 index 0000000..4e5f219 --- /dev/null +++ b/test/e2e/doc/README.md @@ -0,0 +1,218 @@ +# OSDE2E Tekton Pipeline Documentation + +Complete documentation for the Tekton-based E2E testing infrastructure with Tekton Results and LokiStack integration. + +--- + +## Quick Navigation + +| Goal | Document | +|------|----------| +| **Complete manual setup** | [MANUAL-SETUP-GUIDE.md](./MANUAL-SETUP-GUIDE.md) | +| **View required config files** | [REQUIRED-CONFIG-FILES.md](./REQUIRED-CONFIG-FILES.md) | +| **Troubleshoot issues** | [TROUBLESHOOTING.md](./TROUBLESHOOTING.md) | +| **Command reference** | [QUICK-REFERENCE.md](./QUICK-REFERENCE.md) | +| **Retrieve test results** | [QUERY-RESULTS-GUIDE.md](./QUERY-RESULTS-GUIDE.md) | +| **Storage architecture** | [STORAGE-GUIDE.md](./STORAGE-GUIDE.md) | + +--- + +## Architecture Overview + +``` +┌─────────────────────────────────────────────────────────┐ +│ Tekton PipelineRun │ +│ (Annotations: results.tekton.dev/record: "true") │ +└────────────┬─────────────────────────────┬──────────────┘ + │ │ + ▼ ▼ + ┌────────────────┐ ┌──────────────────┐ + │ Tekton Results │ │ Pod Logs │ + │ (Metadata) │ │ (stdout/stderr) │ + └────────┬───────┘ └────────┬─────────┘ + │ │ + ▼ ▼ + ┌────────────────┐ ┌──────────────────┐ + │ PostgreSQL │ │ ClusterLogFwd │ + │ (Records) │ │ (Filter+Route) │ + └────────────────┘ └────────┬─────────┘ + │ + ▼ + ┌──────────────────────┐ + │ LokiStack │ + │ (Ingester → S3) │ + └──────────┬───────────┘ + │ + ▼ + ┌──────────────────────┐ + │ AWS S3 │ + │ (Long-term logs) │ + └──────────────────────┘ +``` + +--- + +## Quick Start + +### Automated Setup (Recommended) +```bash +cd test/e2e +./setup-complete-tekton-stack.sh +``` + +This installs: +1. OpenShift Pipelines Operator +2. Tekton Results (PostgreSQL) +3. Loki Operator + LokiStack (S3) +4. ClusterLogForwarder +5. All Tekton resources + +### Run Tests +```bash +./run-with-credentials.sh +``` + +### View Results +```bash +# Get S3 download URLs +oc logs -upload-results-to-s3-pod -n osde2e-tekton + +# Query Tekton Results API +./tekton-results-api.sh query + +# View logs with opc CLI +opc pipelinerun logs -n osde2e-tekton +``` + +--- + +## Document Overview + +### Setup Guides + +| Document | Description | +|----------|-------------| +| [MANUAL-SETUP-GUIDE.md](./MANUAL-SETUP-GUIDE.md) | Complete step-by-step CLI guide | +| [REQUIRED-CONFIG-FILES.md](./REQUIRED-CONFIG-FILES.md) | YAML configuration files explained | + +### Operations + +| Document | Description | +|----------|-------------| +| [QUICK-REFERENCE.md](./QUICK-REFERENCE.md) | Command cheat sheet | +| [TROUBLESHOOTING.md](./TROUBLESHOOTING.md) | Common issues and solutions | + +### Data Access + +| Document | Description | +|----------|-------------| +| [STORAGE-GUIDE.md](./STORAGE-GUIDE.md) | Storage architecture and S3 | +| [QUERY-RESULTS-GUIDE.md](./QUERY-RESULTS-GUIDE.md) | Retrieve logs and results | + +--- + +## Key Configuration Notes + +These are common sources of errors during setup: + +### Tekton Results Field Name +```yaml +# Correct +spec.result.disabled: false + +# Incorrect (will not work) +spec.results.enabled: true +``` + +### Loki Operator Channel +```yaml +# Correct - use versioned channel +channel: stable-6.4 + +# Incorrect - generic channel does not exist +channel: stable +``` + +### LokiStack Sizing +Size based on **single node** resources, not total cluster: +- Nodes < 4 CPU: `1x.demo` +- Nodes 4-6 CPU: `1x.extra-small` +- Nodes >= 7 CPU: `1x.small` + +--- + +## Directory Structure + +``` +test/e2e/ +├── Scripts +│ ├── setup-complete-tekton-stack.sh # Complete setup +│ ├── run-with-credentials.sh # Run tests +│ ├── tekton-results-api.sh # Query Results API +│ └── view-pipeline-logs.sh # View logs +│ +├── Tekton Resources +│ ├── osde2e-tekton-task.yml # Main Task +│ ├── osde2e-pipeline.yml # Pipeline +│ ├── upload-to-s3-task.yml # S3 upload Task +│ └── e2e-tekton-template.yml # OpenShift Template +│ +├── Configuration +│ ├── ClusterLogForwarder.yaml # Log forwarding +│ ├── tekton-results-reader.yaml # RBAC for Results API +│ └── loki-s3-policy.json # S3 IAM policy +│ +└── doc/ # Documentation +``` + +--- + +## Prerequisites + +- OpenShift cluster (4.12+) +- `oc` CLI with cluster-admin access +- AWS account (for S3 storage) +- OCM credentials (for OSDE2E tests) + +--- + +## App-Interface Integration + +The `e2e-tekton-template.yml` can be referenced by app-interface: + +```yaml +resourceTemplates: +- name: saas-oeo-e2e-test + url: https://github.com/openshift/osd-example-operator + path: /test/e2e/e2e-tekton-template.yml + parameters: + IMAGE_TAG: latest + OSDE2E_CONFIGS: rosa,sts,int + CLUSTER_ID: ${CLUSTER_ID} + +managedResourceTypes: +- PipelineRun.tekton.dev +- ServiceAccount +- Role.rbac.authorization.k8s.io +- RoleBinding.rbac.authorization.k8s.io +- PersistentVolumeClaim +``` + +--- + +## Data Flow Timeline + +| Time | Event | +|------|-------| +| T+0 | PipelineRun completes | +| T+0 | Results uploaded to S3 (pre-signed URLs available) | +| T+5-10 min | Logs available in Loki | +| T+30 min | Logs flushed to S3 (Loki chunks) | + +--- + +## References + +- [OpenShift Pipelines Documentation](https://docs.openshift.com/pipelines/) +- [Tekton Results Documentation](https://docs.redhat.com/en/documentation/red_hat_openshift_pipelines/) +- [Loki Operator Documentation](https://docs.openshift.com/container-platform/latest/logging/cluster-logging-loki.html) diff --git a/test/e2e/doc/REQUIRED-CONFIG-FILES.md b/test/e2e/doc/REQUIRED-CONFIG-FILES.md new file mode 100644 index 0000000..c076880 --- /dev/null +++ b/test/e2e/doc/REQUIRED-CONFIG-FILES.md @@ -0,0 +1,657 @@ +# Required Configuration Files + +Complete list of configuration files needed for manual OSDE2E Tekton Pipeline setup. + +--- + +## File Overview + +| File | Purpose | Required | When to Apply | +|------|---------|----------|---------------| +| `osde2e-tekton-task.yml` | Main test Task definition | ✅ Yes | Step 8 | +| `upload-to-s3-task.yml` | S3 upload Task | ✅ Yes | Step 8 | +| `osde2e-pipeline.yml` | Pipeline orchestration | ✅ Yes | Step 8 | +| `e2e-tekton-template.yml` | OpenShift Template for running tests | ✅ Yes | Step 10 | +| `ClusterLogForwarder.yaml` | Log forwarding to Loki | Optional | Step 7 | +| `loki-s3-policy.json` | IAM policy reference | Reference | Step 5 | + +--- + +## File 1: osde2e-tekton-task.yml + +**Purpose:** Defines the main test Task that runs OSDE2E tests. + +**Key Features:** +- Runs test container with configurable parameters +- Captures JUnit XML results +- Stores logs in workspace PVC +- Produces structured results for Tekton Results + +**Apply Command:** +```bash +oc apply -f osde2e-tekton-task.yml -n osde2e-tekton +``` + +
+📄 Click to view full file content + +```yaml +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: osde2e-test-task + labels: + app.kubernetes.io/version: "0.1" + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Testing + tekton.dev/tags: osde2e,testing,e2e + tekton.dev/displayName: "OSDE2E Test Task" +spec: + description: >- + Runs osde2e tests and collects results for Tekton Results observability. + + params: + - name: OSDE2E_CONFIGS + type: string + description: Configuration string for osde2e (e.g., "rosa,sts,int,ad-hoc-image") + - name: TEST_IMAGE + type: string + description: The test image to run + - name: IMAGE_TAG + type: string + default: "latest" + - name: OCM_CLIENT_ID + type: string + default: "" + - name: OCM_CLIENT_SECRET + type: string + default: "" + - name: AWS_ACCESS_KEY_ID + type: string + default: "" + - name: AWS_SECRET_ACCESS_KEY + type: string + default: "" + - name: CLOUD_PROVIDER_REGION + type: string + default: "us-east-1" + - name: LOG_BUCKET + type: string + default: "osde2e-logs" + - name: USE_EXISTING_CLUSTER + type: string + default: "TRUE" + - name: CLUSTER_ID + type: string + default: "" + - name: CAD_PAGERDUTY_ROUTING_KEY + type: string + default: "" + + results: + - name: test-results + description: JUnit XML test results + - name: test-logs + description: Test execution logs + - name: test-status + description: Overall test status (PASS/FAIL) + - name: test-summary + description: Test execution summary + + workspaces: + - name: test-results + description: Workspace for storing test results and logs + mountPath: /workspace/test-results + + steps: + - name: setup-test-environment + image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest + env: + - name: HOME + value: /tekton/home + script: | + #!/bin/bash + set -euo pipefail + mkdir -p $(workspaces.test-results.path)/junit + mkdir -p $(workspaces.test-results.path)/logs + mkdir -p $(workspaces.test-results.path)/reports + mkdir -p $(workspaces.test-results.path)/shared + echo "Workspace directories created" + + - name: run-osde2e-tests + image: $(params.TEST_IMAGE):$(params.IMAGE_TAG) + env: + - name: OSDE2E_CONFIGS + value: $(params.OSDE2E_CONFIGS) + - name: OCM_CLIENT_ID + value: $(params.OCM_CLIENT_ID) + - name: OCM_CLIENT_SECRET + value: $(params.OCM_CLIENT_SECRET) + - name: AWS_ACCESS_KEY_ID + value: $(params.AWS_ACCESS_KEY_ID) + - name: AWS_SECRET_ACCESS_KEY + value: $(params.AWS_SECRET_ACCESS_KEY) + - name: CLOUD_PROVIDER_REGION + value: $(params.CLOUD_PROVIDER_REGION) + - name: LOG_BUCKET + value: $(params.LOG_BUCKET) + - name: USE_EXISTING_CLUSTER + value: $(params.USE_EXISTING_CLUSTER) + - name: CLUSTER_ID + value: $(params.CLUSTER_ID) + - name: REPORT_DIR + value: $(workspaces.test-results.path)/reports + - name: JUNIT_REPORT_DIR + value: $(workspaces.test-results.path)/junit + script: | + #!/bin/bash + set -euo pipefail + + # Run tests and capture results + /osde2e test --configs $OSDE2E_CONFIGS 2>&1 | tee $(workspaces.test-results.path)/logs/osde2e-full.log + + # Determine test status + if [ $? -eq 0 ]; then + echo "PASS" > /tmp/test-status.txt + else + echo "FAIL" > /tmp/test-status.txt + fi +``` + +
+ +--- + +## File 2: upload-to-s3-task.yml + +**Purpose:** Uploads test results to S3 for long-term storage and generates pre-signed URLs. + +**Key Features:** +- Uploads all files from workspace to S3 +- Organizes by date: `test-results/YYYY-MM-DD//` +- Generates 7-day pre-signed URLs for browser access + +**Apply Command:** +```bash +oc apply -f upload-to-s3-task.yml -n osde2e-tekton +``` + +
+📄 Click to view full file content + +```yaml +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: upload-to-s3-task + labels: + app.kubernetes.io/version: "0.1" +spec: + description: >- + Upload test results to S3 bucket for long-term storage. + Generates pre-signed URLs for easy browser access. + + params: + - name: S3_BUCKET + type: string + default: "osde2e-loki-logs" + - name: PIPELINE_RUN_NAME + type: string + - name: AWS_REGION + type: string + default: "us-east-1" + - name: TEST_STATUS + type: string + default: "UNKNOWN" + - name: OSDE2E_CONFIGS + type: string + default: "" + + workspaces: + - name: test-results + mountPath: /workspace/test-results + + results: + - name: s3-path + description: S3 path where results are stored + - name: upload-status + description: Upload status (SUCCESS/FAILED) + + steps: + - name: upload-to-s3 + image: amazon/aws-cli:2.15.0 + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: loki-s3-credentials + key: access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: loki-s3-credentials + key: access_key_secret + - name: AWS_DEFAULT_REGION + value: "$(params.AWS_REGION)" + script: | + #!/bin/bash + set -euo pipefail + + S3_BUCKET="$(params.S3_BUCKET)" + PIPELINE_RUN="$(params.PIPELINE_RUN_NAME)" + DATE_PREFIX=$(date +%Y-%m-%d) + TIMESTAMP=$(date +%Y%m%d-%H%M%S) + S3_PREFIX="test-results/${DATE_PREFIX}/${PIPELINE_RUN}-${TIMESTAMP}" + + echo "Uploading to s3://${S3_BUCKET}/${S3_PREFIX}/" + + # Upload all files + aws s3 cp /workspace/test-results/ "s3://${S3_BUCKET}/${S3_PREFIX}/" --recursive + + # Generate pre-signed URLs (valid 7 days = 604800 seconds) + echo "Pre-signed URLs (valid 7 days):" + aws s3 presign "s3://${S3_BUCKET}/${S3_PREFIX}/logs/osde2e-full.log" --expires-in 604800 || true + + echo -n "s3://${S3_BUCKET}/${S3_PREFIX}/" > $(results.s3-path.path) + echo -n "SUCCESS" > $(results.upload-status.path) +``` + +
+ +--- + +## File 3: osde2e-pipeline.yml + +**Purpose:** Orchestrates the test Task and S3 upload Task. + +**Key Features:** +- Runs main test Task +- Automatically uploads results to S3 in `finally` section +- Passes test status to S3 upload Task + +**Apply Command:** +```bash +oc apply -f osde2e-pipeline.yml -n osde2e-tekton +``` + +
+📄 Click to view full file content + +```yaml +apiVersion: tekton.dev/v1 +kind: Pipeline +metadata: + name: osde2e-test-pipeline +spec: + description: >- + Orchestrates osde2e testing with S3 result upload. + + params: + - name: OSDE2E_CONFIGS + type: string + - name: TEST_IMAGE + type: string + - name: IMAGE_TAG + type: string + default: "latest" + - name: OCM_CLIENT_ID + type: string + default: "" + - name: OCM_CLIENT_SECRET + type: string + default: "" + - name: AWS_ACCESS_KEY_ID + type: string + default: "" + - name: AWS_SECRET_ACCESS_KEY + type: string + default: "" + - name: CLOUD_PROVIDER_REGION + type: string + default: "us-east-1" + - name: LOG_BUCKET + type: string + default: "osde2e-logs" + - name: USE_EXISTING_CLUSTER + type: string + default: "TRUE" + - name: CLUSTER_ID + type: string + default: "" + - name: CAD_PAGERDUTY_ROUTING_KEY + type: string + default: "" + - name: S3_RESULTS_BUCKET + type: string + default: "osde2e-loki-logs" + + workspaces: + - name: test-workspace + + results: + - name: final-test-status + value: $(tasks.osde2e-test.results.test-status) + + tasks: + - name: osde2e-test + taskRef: + name: osde2e-test-task + params: + - name: OSDE2E_CONFIGS + value: $(params.OSDE2E_CONFIGS) + - name: TEST_IMAGE + value: $(params.TEST_IMAGE) + - name: IMAGE_TAG + value: $(params.IMAGE_TAG) + # ... other params passed through + workspaces: + - name: test-results + workspace: test-workspace + + finally: + - name: upload-results-to-s3 + taskRef: + name: upload-to-s3-task + params: + - name: S3_BUCKET + value: $(params.S3_RESULTS_BUCKET) + - name: PIPELINE_RUN_NAME + value: $(context.pipelineRun.name) + - name: AWS_REGION + value: $(params.CLOUD_PROVIDER_REGION) + - name: TEST_STATUS + value: $(tasks.osde2e-test.results.test-status) + workspaces: + - name: test-results + workspace: test-workspace +``` + +
+ +--- + +## File 4: e2e-tekton-template.yml + +**Purpose:** OpenShift Template for easily creating PipelineRuns. + +**Key Features:** +- Creates PVC for test workspace +- Creates PipelineRun with all parameters +- Auto-generates unique JOBID +- Sets timeouts (3 hours total) + +**Usage:** +```bash +oc process -f e2e-tekton-template.yml \ + -p OSDE2E_CONFIGS="rosa,sts,int,ad-hoc-image" \ + -p TEST_IMAGE="quay.io/redhat-services-prod/oeo-cicada-tenant/osd-example-operator-e2e" \ + -p IMAGE_TAG="latest" \ + -p CLUSTER_ID="your-cluster-id" \ + | oc apply -f - -n osde2e-tekton +``` + +
+📄 Click to view full file content + +```yaml +apiVersion: template.openshift.io/v1 +kind: Template +metadata: + name: osde2e-focused-tests-tekton + labels: + app: osde2e + component: testing +parameters: + - name: OSDE2E_CONFIGS + displayName: "OSDE2E Configurations" + required: true + - name: TEST_IMAGE + displayName: "Test Image" + required: true + - name: IMAGE_TAG + displayName: "Image Tag" + required: true + - name: CLUSTER_ID + displayName: "Cluster ID" + required: false + value: '' + - name: OCM_CLIENT_ID + required: false + - name: OCM_CLIENT_SECRET + required: false + - name: AWS_ACCESS_KEY_ID + required: false + - name: AWS_SECRET_ACCESS_KEY + required: false + - name: CLOUD_PROVIDER_REGION + value: "us-east-1" + - name: JOBID + generate: expression + from: "[0-9a-z]{7}" + +objects: + # PVC for workspace + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: osde2e-test-workspace-${JOBID} + labels: + app: osde2e + job-id: ${JOBID} + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp3-csi + + # PipelineRun + - apiVersion: tekton.dev/v1 + kind: PipelineRun + metadata: + name: osde2e-osd-example-operator-${IMAGE_TAG}-${JOBID} + labels: + app: osde2e + job-id: ${JOBID} + app.kubernetes.io/managed-by: tekton-pipelines + annotations: + results.tekton.dev/record: "true" + results.tekton.dev/log: "true" + spec: + serviceAccountName: pipeline + pipelineRef: + name: osde2e-test-pipeline + params: + - name: OSDE2E_CONFIGS + value: ${OSDE2E_CONFIGS} + - name: TEST_IMAGE + value: ${TEST_IMAGE} + - name: IMAGE_TAG + value: ${IMAGE_TAG} + - name: CLUSTER_ID + value: ${CLUSTER_ID} + # ... other params + workspaces: + - name: test-workspace + persistentVolumeClaim: + claimName: osde2e-test-workspace-${JOBID} + timeouts: + pipeline: "3h0m0s" + tasks: "2h45m0s" + finally: "15m0s" +``` + +
+ +--- + +## File 5: ClusterLogForwarder.yaml (Optional) + +**Purpose:** Forwards Tekton pod logs to LokiStack for real-time log aggregation. + +**When to Use:** Only if you've installed Loki and want real-time log forwarding. + +**Apply Command:** +```bash +oc apply -f ClusterLogForwarder.yaml +``` + +
+📄 Click to view full file content + +```yaml +apiVersion: observability.openshift.io/v1 +kind: ClusterLogForwarder +metadata: + name: tekton-to-loki + namespace: openshift-logging +spec: + managementState: Managed + serviceAccount: + name: collector + + inputs: + - name: tekton-logs + type: application + application: + selector: + matchExpressions: + - key: app.kubernetes.io/managed-by + operator: In + values: + - tekton-pipelines + - pipelinesascode.tekton.dev + namespaces: + - osde2e-tekton + + outputs: + - name: loki-output + type: lokiStack + lokiStack: + target: + name: osde2e-loki + namespace: osde2e-tekton + authentication: + token: + from: serviceAccount + tls: + ca: + key: service-ca.crt + configMapName: openshift-service-ca.crt + + pipelines: + - name: tekton-to-loki-pipeline + inputRefs: + - tekton-logs + outputRefs: + - loki-output +``` + +
+ +--- + +## File 6: loki-s3-policy.json (Reference) + +**Purpose:** IAM policy for S3 access. Use as reference when creating IAM user. + +**Usage:** +```bash +aws iam put-user-policy \ + --user-name YOUR_IAM_USER \ + --policy-name LokiS3Access \ + --policy-document file://loki-s3-policy.json +``` + +
+📄 Click to view full file content + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::osde2e-loki-logs" + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::osde2e-loki-logs/*" + } + ] +} +``` + +
+ +--- + +## Quick Apply All Required Files + +```bash +cd /path/to/osd-example-operator/test/e2e + +# Apply Tasks +oc apply -f osde2e-tekton-task.yml -n osde2e-tekton +oc apply -f upload-to-s3-task.yml -n osde2e-tekton + +# Apply Pipeline +oc apply -f osde2e-pipeline.yml -n osde2e-tekton + +# Verify +oc get task,pipeline -n osde2e-tekton +``` + +--- + +## Secrets Required + +### 1. S3 Credentials (for upload-to-s3-task) + +```bash +oc create secret generic loki-s3-credentials \ + --from-literal=access_key_id="AKIA..." \ + --from-literal=access_key_secret="your-secret" \ + --from-literal=bucketnames="osde2e-loki-logs" \ + --from-literal=endpoint="https://s3.us-east-1.amazonaws.com" \ + --from-literal=region="us-east-1" \ + -n osde2e-tekton +``` + +### 2. Test Credentials (for tests) + +```bash +oc create secret generic osde2e-credentials \ + --from-literal=OCM_CLIENT_ID="cloud-services" \ + --from-literal=OCM_CLIENT_SECRET="your-ocm-token" \ + --from-literal=AWS_ACCESS_KEY_ID="AKIA..." \ + --from-literal=AWS_SECRET_ACCESS_KEY="your-aws-secret" \ + -n osde2e-tekton +``` + +--- + +## Verification + +```bash +# Check all resources applied +oc get task -n osde2e-tekton +# Expected: osde2e-test-task, upload-to-s3-task + +oc get pipeline -n osde2e-tekton +# Expected: osde2e-test-pipeline + +oc get secret -n osde2e-tekton | grep -E "loki-s3|osde2e-credentials" +# Expected: loki-s3-credentials, osde2e-credentials +``` diff --git a/test/e2e/doc/STORAGE-GUIDE.md b/test/e2e/doc/STORAGE-GUIDE.md new file mode 100644 index 0000000..a19a92c --- /dev/null +++ b/test/e2e/doc/STORAGE-GUIDE.md @@ -0,0 +1,346 @@ +# Storage Guide + +Guide to test result storage: where data is stored, how to access it, and S3 configuration. + +--- + +## Overview: Where Are Test Results Stored? + +| Data Type | Location | Retention | Access Method | +|-----------|----------|-----------|---------------| +| Test logs (`osde2e-full.log`) | S3 | 30+ days | Pre-signed URLs | +| JUnit XML reports | S3 | 30+ days | Pre-signed URLs | +| Pod stdout/stderr | Loki → S3 | 30 days | `opc` CLI | +| Run metadata | PostgreSQL | 90 days | Results API | + +--- + +## Storage Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Tekton PipelineRun │ +└─────────────────────────────────────────────────────────────┘ + │ + ┌─────────────────┼─────────────────┐ + │ │ │ + ▼ ▼ ▼ +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ Workspace PVC │ │ Pod Logs │ │ Tekton Results │ +│ (test files) │ │ (stdout) │ │ (metadata) │ +└────────┬────────┘ └────────┬────────┘ └────────┬────────┘ + │ │ │ + ▼ ▼ ▼ +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ S3 Bucket │ │ LokiStack │ │ PostgreSQL │ +│ (test-results/)│ │ (→ S3 chunks) │ │ (internal) │ +└─────────────────┘ └─────────────────┘ └─────────────────┘ +``` + +--- + +## S3 Setup Guide + +### Step 1: Create S3 Bucket + +```bash +aws s3 mb s3://osde2e-loki-logs --region us-east-1 +``` + +### Step 2: Create IAM Policy + +Save the following to `loki-s3-policy.json`: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::osde2e-loki-logs" + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::osde2e-loki-logs/*" + } + ] +} +``` + +Apply to IAM user: + +```bash +aws iam put-user-policy \ + --user-name loki-storage-user \ + --policy-name LokiS3Access \ + --policy-document file://loki-s3-policy.json +``` + +### Step 3: Create Kubernetes Secret + +```bash +oc create secret generic loki-s3-credentials \ + --from-literal=access_key_id="AKIA..." \ + --from-literal=access_key_secret="your-secret" \ + --from-literal=bucketnames="osde2e-loki-logs" \ + --from-literal=endpoint="https://s3.us-east-1.amazonaws.com" \ + --from-literal=region="us-east-1" \ + -n osde2e-tekton +``` + +### Step 4: Verify S3 Access + +```bash +aws s3 ls s3://osde2e-loki-logs/ --region us-east-1 +``` + +--- + +## S3 Troubleshooting + +### Issue 1: AccessDenied - Wrong AWS Account + +**Error:** +``` +AccessDenied: User: arn:aws:iam::ACCOUNT_A:user/xxx is not authorized +to perform: s3:PutObject on resource: "arn:aws:s3:::osde2e-loki-logs" +``` + +**Cause:** The IAM user is in a different AWS account than the S3 bucket. + +**Solution:** +- Verify which AWS account owns the bucket +- Use an IAM user from the same account +- Check Access Key ID matches the correct user + +### Issue 2: IAM Policy References Wrong Bucket + +**Error:** +``` +Policy configured with bucket: old-bucket-name +``` + +**Cause:** The IAM policy has an outdated bucket name. + +**Solution:** +```bash +# Update policy with correct bucket +aws iam put-user-policy \ + --user-name loki-storage-user \ + --policy-name LokiS3Access \ + --policy-document file://loki-s3-policy.json + +# Verify +aws iam get-user-policy --user-name loki-storage-user --policy-name LokiS3Access +``` + +### Issue 3: S3 URLs Return "Access Denied" + +**Error:** +```xml +AccessDeniedAccess Denied +``` + +**Cause:** S3 buckets are private. Direct object URLs don't include authentication. + +**Solution:** Use pre-signed URLs (valid 7 days): +```bash +# Get from upload task logs +oc logs -upload-results-to-s3-pod -n osde2e-tekton + +# Or generate manually +aws s3 presign s3://osde2e-loki-logs/path/to/file --expires-in 604800 +``` + +### Issue 4: Missing S3 Permissions + +**Cause:** Policy missing required actions. + +**Solution:** Ensure policy includes all 5 actions: +- `s3:PutObject` - Upload files +- `s3:GetObject` - Download files +- `s3:DeleteObject` - Clean up old files +- `s3:ListBucket` - List bucket contents +- `s3:GetBucketLocation` - Required by AWS SDK + +--- + +## S3 Configuration Checklist + +| Item | Example Value | +|------|---------------| +| AWS Account | Must match bucket owner | +| IAM User | `loki-storage-user` | +| Bucket Name | `osde2e-loki-logs` | +| Region | `us-east-1` | +| Secret Name | `loki-s3-credentials` | +| Secret Keys | `access_key_id`, `access_key_secret` | + +--- + +## 1. Test Result Files (Primary Storage) + +Test outputs including logs, JUnit XML, and reports are uploaded to S3. + +### S3 Bucket Structure + +``` +s3://osde2e-loki-logs/ +└── test-results/ + └── 2025-12-03/ + └── osde2e-xxx-20251203-123456/ + ├── logs/ + │ ├── osde2e-full.log + │ ├── consolidated.log + │ └── summary.log + ├── reports/ + │ ├── test_output.log + │ └── install-log.txt + └── junit/ + └── merged-results.xml +``` + +### Accessing Test Results + +**Method 1: Pre-signed URLs (Recommended)** + +```bash +# View upload task logs after pipeline completes +oc logs -upload-results-to-s3-pod -n osde2e-tekton + +# Output includes URLs like: +# osde2e-full.log: +# https://osde2e-loki-logs.s3.us-east-1.amazonaws.com/test-results/...?X-Amz-... +``` + +**Method 2: AWS CLI** + +```bash +# List results +aws s3 ls s3://osde2e-loki-logs/test-results/ --recursive | head -20 + +# Download results +aws s3 cp s3://osde2e-loki-logs/test-results/2025-12-03/osde2e-xxx/ ./results/ --recursive + +# Generate pre-signed URL manually +aws s3 presign s3://osde2e-loki-logs/test-results/2025-12-03/xxx/logs/osde2e-full.log --expires-in 604800 +``` + +--- + +## 2. Real-time Pod Logs (Loki) + +Pod stdout/stderr logs are forwarded to Loki and stored in S3 in binary format. + +### Access via opc CLI + +```bash +# View PipelineRun logs +opc pipelinerun logs -n osde2e-tekton + +# View specific TaskRun +opc taskrun logs -n osde2e-tekton + +# Follow live logs +opc pipelinerun logs -n osde2e-tekton --follow +``` + +### Access via oc (While Pod Exists) + +```bash +# List pods +oc get pods -n osde2e-tekton -l tekton.dev/pipelineRun= + +# View logs +oc logs -n osde2e-tekton --all-containers + +# Follow logs +oc logs -f -n osde2e-tekton +``` + +**Note:** Pod logs are deleted when pods are removed. Use `opc` for historical access. + +--- + +## 3. Run Metadata (Tekton Results) + +Tekton Results stores structured metadata in PostgreSQL: +- PipelineRun/TaskRun definitions +- Status, conditions, timestamps +- Result values (PASS/FAIL, summary) + +### Query via Results API + +```bash +# Port-forward to Results API +oc port-forward svc/tekton-results-api-service 8080:8080 -n openshift-pipelines & + +# Get token +TOKEN=$(oc whoami -t) + +# Query results +curl -sk -H "Authorization: Bearer $TOKEN" \ + "https://localhost:8080/apis/results.tekton.dev/v1alpha2/parents/osde2e-tekton/results" \ + | jq '.results[-5:] | .[].name' +``` + +--- + +## Installing opc CLI + +```bash +# macOS +brew tap openshift-pipelines/pipelines-as-code +brew install opc + +# Linux +curl -LO https://github.com/openshift-pipelines/opc/releases/latest/download/opc_linux_amd64.tar.gz +tar xzf opc_linux_amd64.tar.gz +sudo mv opc /usr/local/bin/ + +# Verify +opc version +``` + +--- + +## Quick Reference + +| Task | Command | +|------|---------| +| Get S3 URLs | `oc logs -upload-results-to-s3-pod -n osde2e-tekton` | +| Download from S3 | `aws s3 cp s3://osde2e-loki-logs/test-results/... ./` | +| View historical logs | `opc pipelinerun logs -n osde2e-tekton` | +| View live pod logs | `oc logs -f -n osde2e-tekton` | +| Query Results API | `./tekton-results-api.sh query` | +| Verify S3 access | `aws s3 ls s3://osde2e-loki-logs/` | +| Check IAM policy | `aws iam get-user-policy --user-name loki-storage-user --policy-name LokiS3Access` | + +--- + +## Troubleshooting + +### opc Shows "No Results" + +Tekton Results may not be enabled: +```bash +oc get tektonconfig config -o jsonpath='{.spec.result.disabled}' +# Should be "false" +``` + +### Logs Not Appearing in Loki/S3 + +Check ClusterLogForwarder configuration: +```bash +oc get clusterlogforwarder -n openshift-logging +oc get pods -n openshift-logging | grep collector +``` diff --git a/test/e2e/doc/TROUBLESHOOTING.md b/test/e2e/doc/TROUBLESHOOTING.md new file mode 100644 index 0000000..c69f52c --- /dev/null +++ b/test/e2e/doc/TROUBLESHOOTING.md @@ -0,0 +1,341 @@ +# Troubleshooting Guide + +Common issues and solutions for OSDE2E Tekton Pipeline setup. + +--- + +## Quick Diagnosis + +```bash +# Check all components +oc get tektonconfig config # Tekton Pipelines +oc get pods -n openshift-pipelines | grep tekton-results # Tekton Results +oc get csv -n openshift-operators | grep loki # Loki Operator +oc get lokistack osde2e-loki -n osde2e-tekton # LokiStack +oc get pods -n osde2e-tekton | grep loki # Loki Pods +``` + +--- + +## Issue 1: Loki Operator Installation Timeout + +### Symptoms +``` +Timeout waiting for: Loki Operator installation +constraints not satisfiable: no operators found in channel stable +``` + +### Root Cause +The Loki Operator uses versioned channels (e.g., `stable-6.4`), not a generic `stable` channel. + +### Solution + +```bash +# 1. Check available channels +oc get packagemanifest loki-operator -n openshift-marketplace \ + -o jsonpath='{.status.channels[*].name}' | tr ' ' '\n' +# Output: stable-6.2 stable-6.3 stable-6.4 + +# 2. Delete failed subscription +oc delete subscription loki-operator -n openshift-operators + +# 3. Recreate with correct channel +cat <= 7 CPU | + +### Solution + +```bash +# 1. Check node resources +oc get nodes -l node-role.kubernetes.io/worker \ + -o custom-columns='NAME:.metadata.name,CPU:.status.allocatable.cpu,MEM:.status.allocatable.memory' + +# 2. Change LokiStack size to 1x.demo +oc patch lokistack osde2e-loki -n osde2e-tekton \ + --type=merge -p '{"spec":{"size":"1x.demo"}}' + +# 3. Force recreate stuck pods +oc delete pod osde2e-loki-ingester-0 osde2e-loki-compactor-0 \ + -n osde2e-tekton --force --grace-period=0 + +# 4. Verify pods start +oc get pods -n osde2e-tekton | grep loki +``` + +### Prevention +The `setup-complete-tekton-stack.sh` script checks single-node capacity when selecting LokiStack size. + +--- + +## Issue 3: S3 Access Denied + +### Symptoms +``` +AccessDenied: User: arn:aws:iam::XXXX:user/XXX is not authorized +to perform: s3:PutObject on resource: "arn:aws:s3:::osde2e-loki-logs" +``` + +### Root Cause +The IAM user lacks required S3 permissions. + +### Solution + +**Required IAM Policy:** +```json +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject", + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": [ + "arn:aws:s3:::osde2e-loki-logs", + "arn:aws:s3:::osde2e-loki-logs/*" + ] + }] +} +``` + +**Apply via AWS CLI:** +```bash +# Save policy to file +cat > /tmp/loki-s3-policy.json << 'EOF' +{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": ["s3:PutObject","s3:GetObject","s3:DeleteObject","s3:ListBucket","s3:GetBucketLocation"], + "Resource": ["arn:aws:s3:::osde2e-loki-logs","arn:aws:s3:::osde2e-loki-logs/*"] + }] +} +EOF + +# Apply to IAM user +aws iam put-user-policy \ + --user-name YOUR_IAM_USER \ + --policy-name LokiS3Access \ + --policy-document file:///tmp/loki-s3-policy.json +``` + +**Verify:** +```bash +aws s3 ls s3://osde2e-loki-logs/ --region us-east-1 +``` + +--- + +## Issue 4: ClusterLogForwarder CRD Not Found + +### Symptoms +``` +error: resource mapping not found for kind "ClusterLogForwarder" +ensure CRDs are installed first +``` + +### Root Cause +The Cluster Logging Operator is not installed or CRDs are not ready. + +### Solution + +```bash +# 1. Create namespace +oc create namespace openshift-logging || true + +# 2. Create OperatorGroup +cat <AccessDeniedAccess Denied +``` + +### Root Cause +S3 buckets are private by default. Direct URLs do not include authentication. + +### Solution +Use pre-signed URLs from the upload task logs, or generate manually: + +```bash +# Generate pre-signed URL (valid 7 days) +aws s3 presign s3://osde2e-loki-logs/path/to/file --expires-in 604800 + +# Or check PipelineRun upload task logs for pre-generated URLs +oc logs -upload-results-to-s3-pod -n osde2e-tekton +``` + +--- + +## Diagnostic Commands Reference + +```bash +# Tekton Pipelines +oc get tektonconfig config -o yaml +oc get pipelinerun -n osde2e-tekton +oc get taskrun -n osde2e-tekton + +# Tekton Results +oc get pods -n openshift-pipelines | grep tekton-results +oc logs -l app.kubernetes.io/name=tekton-results-api -n openshift-pipelines + +# Loki Operator +oc get csv -n openshift-operators | grep loki +oc get subscription loki-operator -n openshift-operators -o yaml + +# LokiStack +oc get lokistack osde2e-loki -n osde2e-tekton -o yaml +oc get pods -n osde2e-tekton -l app.kubernetes.io/name=lokistack +oc logs -l app.kubernetes.io/component=ingester -n osde2e-tekton --tail=50 + +# ClusterLogForwarder +oc get clusterlogforwarder -n openshift-logging -o yaml +oc get pods -n openshift-logging + +# S3 Access Test +oc run test-s3 --rm -it --restart=Never \ + --image=amazon/aws-cli:latest \ + --env="AWS_ACCESS_KEY_ID=$(oc get secret loki-s3-credentials -n osde2e-tekton -o jsonpath='{.data.access_key_id}' | base64 -d)" \ + --env="AWS_SECRET_ACCESS_KEY=$(oc get secret loki-s3-credentials -n osde2e-tekton -o jsonpath='{.data.access_key_secret}' | base64 -d)" \ + --env="AWS_DEFAULT_REGION=us-east-1" \ + -n osde2e-tekton -- aws s3 ls s3://osde2e-loki-logs/ +``` + +--- + +## Related Documentation + +- [MANUAL-SETUP-GUIDE.md](./MANUAL-SETUP-GUIDE.md) - Complete setup guide +- [QUICK-REFERENCE.md](./QUICK-REFERENCE.md) - Command cheat sheet +- [STORAGE-GUIDE.md](./STORAGE-GUIDE.md) - Storage configuration + diff --git a/test/e2e/loki-s3-policy.json b/test/e2e/loki-s3-policy.json new file mode 100644 index 0000000..662c191 --- /dev/null +++ b/test/e2e/loki-s3-policy.json @@ -0,0 +1,22 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::osde2e-loki-logs" + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::osde2e-loki-logs/*" + } + ] +} diff --git a/test/e2e/osde2e-pipeline.yml b/test/e2e/osde2e-pipeline.yml index f6d6112..d5ca818 100644 --- a/test/e2e/osde2e-pipeline.yml +++ b/test/e2e/osde2e-pipeline.yml @@ -14,7 +14,8 @@ spec: description: >- This pipeline orchestrates osde2e testing with comprehensive result collection for Tekton Results observability. It runs tests, collects logs and JUnit results, - and provides structured output for monitoring and analysis. + uploads results to S3 for long-term storage, and provides structured output + for monitoring and analysis. params: - name: OSDE2E_CONFIGS @@ -63,6 +64,10 @@ spec: type: string description: PagerDuty routing key for alerts default: "" + - name: S3_RESULTS_BUCKET + type: string + description: S3 bucket for storing test results (long-term) + default: "osde2e-loki-logs" workspaces: - name: test-workspace @@ -110,59 +115,21 @@ spec: workspace: test-workspace finally: - - name: cleanup-and-report + # Upload results to S3 for long-term storage and URL access + - name: upload-results-to-s3 + taskRef: + name: upload-to-s3-task params: + - name: S3_BUCKET + value: $(params.S3_RESULTS_BUCKET) + - name: PIPELINE_RUN_NAME + value: $(context.pipelineRun.name) + - name: AWS_REGION + value: $(params.CLOUD_PROVIDER_REGION) - name: TEST_STATUS value: $(tasks.osde2e-test.results.test-status) - name: OSDE2E_CONFIGS value: $(params.OSDE2E_CONFIGS) - taskSpec: - params: - - name: TEST_STATUS - - name: OSDE2E_CONFIGS - workspaces: - - name: test-results - mountPath: /workspace/test-results - steps: - - name: final-report - image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest - script: | - #!/bin/bash - set -euo pipefail - - echo "=== Final Pipeline Report ===" - echo "Pipeline execution completed" - echo "Test Status: $(params.TEST_STATUS)" - echo "Configuration: $(params.OSDE2E_CONFIGS)" - echo "Timestamp: $(date -Iseconds)" - - # Log final status to workspace - { - echo "=== Pipeline Final Report ===" - echo "Execution completed at: $(date -Iseconds)" - echo "Final Status: $(params.TEST_STATUS)" - echo "Configuration: $(params.OSDE2E_CONFIGS)" - echo "" - echo "Available result files:" - find /workspace/test-results -type f -name "*.log" -o -name "*.xml" | sort - } >> /workspace/test-results/logs/pipeline-final-report.log - - if [ "$(params.TEST_STATUS)" = "PASS" ]; then - echo "✅ Pipeline completed successfully" - exit 0 - else - echo "❌ Pipeline completed with test failures" - # Don't fail the finally task, just report - exit 0 - fi - securityContext: - runAsNonRoot: true - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: RuntimeDefault workspaces: - name: test-results workspace: test-workspace - diff --git a/test/e2e/osde2e-tekton-task.yml b/test/e2e/osde2e-tekton-task.yml index 1f6083b..3a30d0d 100644 --- a/test/e2e/osde2e-tekton-task.yml +++ b/test/e2e/osde2e-tekton-task.yml @@ -81,6 +81,9 @@ spec: steps: - name: setup-test-environment image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest + env: + - name: HOME + value: /tekton/home script: | #!/bin/bash set -euo pipefail @@ -160,10 +163,15 @@ spec: # Copy summary to result cp $(workspaces.test-results.path)/logs/summary.log $(results.test-summary.path) - # Exit with the original test exit code to maintain pipeline behavior - exit $TEST_EXIT_CODE + # Always exit 0 here to allow collect-test-results step to run + # The actual test status is stored in test-status result + echo "Test execution completed with exit code: $TEST_EXIT_CODE" + exit 0 env: + # Set HOME to writable directory to avoid .docker permission warnings + - name: HOME + value: /tekton/home # Test configuration - name: AD_HOC_TEST_IMAGES value: "$(params.TEST_IMAGE):$(params.IMAGE_TAG)" @@ -227,6 +235,9 @@ spec: - name: collect-test-results image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest + env: + - name: HOME + value: /tekton/home script: | #!/bin/bash set -euo pipefail @@ -234,6 +245,8 @@ spec: echo "Collecting and processing test results..." # Process JUnit XML results from osde2e REPORT_DIR + # NOTE: We store summary only in Results (to avoid 4KB limit) + # Full JUnit XML is saved to workspace for later retrieval JUNIT_FOUND=false # Check for JUnit XML in reports directory (osde2e default location) @@ -242,12 +255,18 @@ spec: find $(workspaces.test-results.path)/reports -name "*.xml" -type f > /tmp/junit_files.txt 2>/dev/null || true if [ -s /tmp/junit_files.txt ]; then - echo "Found JUnit XML files, merging results..." - cat $(cat /tmp/junit_files.txt) > $(results.test-results.path) 2>/dev/null && JUNIT_FOUND=true + echo "Found JUnit XML files" + JUNIT_COUNT=$(cat /tmp/junit_files.txt | wc -l) + + # Merge full results to workspace (not to Result) + cat $(cat /tmp/junit_files.txt) > $(workspaces.test-results.path)/junit/merged-results.xml 2>/dev/null && JUNIT_FOUND=true + + # Store only summary in Result (to avoid size limit) + echo "JUnit: Found $JUNIT_COUNT XML file(s)" > $(results.test-results.path) # Log JUnit summary echo "=== JUnit Results Summary ===" >> $(workspaces.test-results.path)/logs/summary.log - echo "JUnit XML files found:" >> $(workspaces.test-results.path)/logs/summary.log + echo "JUnit XML files found: $JUNIT_COUNT" >> $(workspaces.test-results.path)/logs/summary.log cat /tmp/junit_files.txt >> $(workspaces.test-results.path)/logs/summary.log fi fi @@ -255,18 +274,19 @@ spec: # Also check for junit.xml in the main reports directory (common osde2e pattern) if [ -f "$(workspaces.test-results.path)/reports/junit.xml" ]; then echo "Found junit.xml in reports directory" - cp $(workspaces.test-results.path)/reports/junit.xml $(results.test-results.path) + cp $(workspaces.test-results.path)/reports/junit.xml $(workspaces.test-results.path)/junit/junit.xml + echo "JUnit: reports/junit.xml" > $(results.test-results.path) JUNIT_FOUND=true fi - # If no JUnit results found, create empty result + # If no JUnit results found, create minimal result if [ "$JUNIT_FOUND" = "false" ]; then - echo "No JUnit XML results found, creating empty result" - echo '' > $(results.test-results.path) + echo "No JUnit XML results found" + echo "JUnit: No results" > $(results.test-results.path) echo "No JUnit XML files found" >> $(workspaces.test-results.path)/logs/summary.log fi - # Consolidate all logs + # Consolidate all logs to workspace (not to Result to avoid size limit) echo "Consolidating test logs..." { echo "=== OSDE2E Test Execution Logs ===" @@ -309,7 +329,11 @@ spec: cat $(workspaces.test-results.path)/reports/test_output.log fi fi - } > $(results.test-logs.path) + } > $(workspaces.test-results.path)/logs/consolidated.log + + # Store only summary in Result (to avoid 4KB limit) + echo "Logs consolidated at: $(date -Iseconds)" > $(results.test-logs.path) + echo "Full logs available in workspace: logs/consolidated.log" >> $(results.test-logs.path) echo "Test result collection completed successfully" diff --git a/test/e2e/run-with-credentials.sh b/test/e2e/run-with-credentials.sh new file mode 100755 index 0000000..ae3cd32 --- /dev/null +++ b/test/e2e/run-with-credentials.sh @@ -0,0 +1,369 @@ +#!/bin/bash + +# OSDE2E Tekton Test Runner Script +# Automatically sets up credentials and runs tests + +set -euo pipefail + +# --- Color Definitions --- +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +# --- Configuration --- +NAMESPACE="osde2e-tekton" +SECRET_NAME="osde2e-credentials" +TEMPLATE_FILE="e2e-tekton-template.yml" + +echo -e "${BLUE}╔══════════════════════════════════════════════════════════════╗${NC}" +echo -e "${BLUE}║ OSDE2E Tekton Test Runner ║${NC}" +echo -e "${BLUE}╚══════════════════════════════════════════════════════════════╝${NC}" +echo "" + +# --- Function Definitions --- + +# Check if command exists +check_command() { + if ! command -v "$1" &> /dev/null; then + echo -e "${RED}Error: Command '$1' not found${NC}" + echo -e "${YELLOW}Please install $1${NC}" + exit 1 + fi +} + +# Get OCM credentials +get_ocm_credentials() { + local ocm_config="$HOME/.config/ocm/ocm.json" + + if [ -f "$ocm_config" ]; then + echo -e "${GREEN}Found OCM config: $ocm_config${NC}" + + if command -v jq &> /dev/null; then + OCM_CLIENT_ID=$(jq -r '.client_id // "cloud-services"' "$ocm_config") + OCM_CLIENT_SECRET=$(jq -r '.refresh_token // .client_secret // empty' "$ocm_config") + + if [ -n "$OCM_CLIENT_SECRET" ]; then + echo -e "${GREEN}OCM credentials loaded${NC}" + return 0 + fi + fi + fi + + echo -e "${YELLOW}Warning: OCM credentials not found${NC}" + return 1 +} + +# Get AWS credentials +get_aws_credentials() { + # Check environment variables first + if [ -n "${AWS_ACCESS_KEY_ID:-}" ] && [ -n "${AWS_SECRET_ACCESS_KEY:-}" ]; then + echo -e "${GREEN}AWS credentials loaded from environment variables${NC}" + return 0 + fi + + # Check AWS credentials file + local aws_creds="$HOME/.aws/credentials" + local aws_config="$HOME/.aws/config" + + if [ -f "$aws_creds" ]; then + echo -e "${GREEN}Found AWS credentials file: $aws_creds${NC}" + + # Try to read default profile + AWS_ACCESS_KEY_ID=$(grep -A 2 '\[default\]' "$aws_creds" | grep aws_access_key_id | cut -d'=' -f2 | tr -d ' ') + AWS_SECRET_ACCESS_KEY=$(grep -A 2 '\[default\]' "$aws_creds" | grep aws_secret_access_key | cut -d'=' -f2 | tr -d ' ') + + if [ -n "$AWS_ACCESS_KEY_ID" ] && [ -n "$AWS_SECRET_ACCESS_KEY" ]; then + echo -e "${GREEN}AWS credentials loaded from default profile${NC}" + return 0 + fi + fi + + echo -e "${YELLOW}Warning: AWS credentials not found${NC}" + return 1 +} + +# Prompt user for OCM credentials +prompt_ocm_credentials() { + echo "" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}OCM Credentials Required${NC}" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + echo -e "${YELLOW}How to get OCM credentials:${NC}" + echo " 1. Visit: https://console.redhat.com/openshift/" + echo " 2. Click user menu (top right) -> API Tokens" + echo " 3. Click 'Load Token'" + echo "" + echo -e "${YELLOW}Or use ROSA CLI:${NC}" + echo " rosa login" + echo " cat ~/.config/ocm/ocm.json" + echo "" + + read -p "Enter OCM_CLIENT_ID [default: cloud-services]: " input_client_id + OCM_CLIENT_ID="${input_client_id:-cloud-services}" + + read -sp "Enter OCM_CLIENT_SECRET (Offline Token): " input_client_secret + echo "" + OCM_CLIENT_SECRET="$input_client_secret" + + if [ -z "$OCM_CLIENT_SECRET" ]; then + echo -e "${RED}Error: OCM_CLIENT_SECRET cannot be empty${NC}" + exit 1 + fi + + echo -e "${GREEN}OCM credentials entered${NC}" +} + +# Prompt user for AWS credentials +prompt_aws_credentials() { + echo "" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}AWS Credentials Required (for ROSA Provider)${NC}" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + echo -e "${YELLOW}How to get AWS credentials:${NC}" + echo " 1. AWS Console -> IAM -> Security Credentials" + echo " 2. Or read from ~/.aws/credentials file" + echo " 3. Or set environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY" + echo "" + + read -p "Enter AWS_ACCESS_KEY_ID: " input_aws_key + AWS_ACCESS_KEY_ID="$input_aws_key" + + read -sp "Enter AWS_SECRET_ACCESS_KEY: " input_aws_secret + echo "" + AWS_SECRET_ACCESS_KEY="$input_aws_secret" + + if [ -z "$AWS_ACCESS_KEY_ID" ] || [ -z "$AWS_SECRET_ACCESS_KEY" ]; then + echo -e "${RED}Error: AWS credentials cannot be empty${NC}" + exit 1 + fi + + echo -e "${GREEN}AWS credentials entered${NC}" +} + +# Create or update Secret +create_or_update_secret() { + echo "" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}Creating/Updating Secret (OCM + AWS credentials)${NC}" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + + if oc get secret "$SECRET_NAME" -n "$NAMESPACE" &>/dev/null; then + echo -e "${YELLOW}Secret '$SECRET_NAME' already exists${NC}" + + # Check if existing Secret contains AWS credentials + EXISTING_AWS_KEY=$(oc get secret "$SECRET_NAME" -n "$NAMESPACE" -o jsonpath='{.data.AWS_ACCESS_KEY_ID}' 2>/dev/null || echo "") + if [ -n "$EXISTING_AWS_KEY" ]; then + echo -e "${GREEN} ✓ Contains OCM credentials${NC}" + echo -e "${GREEN} ✓ Contains AWS credentials${NC}" + else + echo -e "${GREEN} ✓ Contains OCM credentials${NC}" + echo -e "${RED} ✗ Missing AWS credentials (update required)${NC}" + fi + + read -p "Update secret? [y/N] " -n 1 -r + echo "" + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo -e "${BLUE}Skipping Secret update${NC}" + return + fi + + echo "Deleting existing Secret..." + oc delete secret "$SECRET_NAME" -n "$NAMESPACE" + fi + + echo "Creating Secret (OCM + AWS credentials)..." + oc create secret generic "$SECRET_NAME" \ + --from-literal=OCM_CLIENT_ID="$OCM_CLIENT_ID" \ + --from-literal=OCM_CLIENT_SECRET="$OCM_CLIENT_SECRET" \ + --from-literal=AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID" \ + --from-literal=AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY" \ + -n "$NAMESPACE" + + echo -e "${GREEN}Secret created successfully${NC}" + echo "" + echo "Secret contains:" + echo " ✓ OCM_CLIENT_ID" + echo " ✓ OCM_CLIENT_SECRET" + echo " ✓ AWS_ACCESS_KEY_ID" + echo " ✓ AWS_SECRET_ACCESS_KEY" +} + +# Run PipelineRun +run_pipeline() { + local cluster_id="${1:-}" + local test_image="${2:-quay.io/redhat-services-prod/oeo-cicada-tenant/osd-example-operator-e2e}" + local image_tag="${3:-latest}" + local configs="${4:-rosa,sts,int,ad-hoc-image}" + + echo "" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}Running OSDE2E Test${NC}" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + + if [ -z "$cluster_id" ]; then + echo -e "${YELLOW}Please enter CLUSTER_ID:${NC}" + echo "" + echo "How to get CLUSTER_ID:" + echo " rosa list clusters" + echo " oc get clusterversion -o jsonpath='{.items[0].spec.clusterID}'" + echo "" + read -p "CLUSTER_ID: " cluster_id + + if [ -z "$cluster_id" ]; then + echo -e "${RED}Error: CLUSTER_ID cannot be empty${NC}" + exit 1 + fi + fi + + echo "Test configuration:" + echo " CLUSTER_ID: $cluster_id" + echo " TEST_IMAGE: $test_image" + echo " IMAGE_TAG: $image_tag" + echo " OSDE2E_CONFIGS: $configs" + echo "" + + read -p "Confirm and run? [Y/n] " -n 1 -r + echo "" + if [[ $REPLY =~ ^[Nn]$ ]]; then + echo -e "${YELLOW}Cancelled${NC}" + exit 0 + fi + + echo "Submitting PipelineRun..." + oc process -f "$TEMPLATE_FILE" \ + -p OSDE2E_CONFIGS="$configs" \ + -p TEST_IMAGE="$test_image" \ + -p IMAGE_TAG="$image_tag" \ + -p CLUSTER_ID="$cluster_id" \ + | oc apply -f - + + echo "" + echo -e "${GREEN}PipelineRun submitted${NC}" +} + +# Show PipelineRun status +show_status() { + echo "" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}PipelineRun Status${NC}" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + + echo "Getting latest PipelineRun..." + sleep 2 + + local pipelinerun=$(oc get pipelinerun -n "$NAMESPACE" \ + --sort-by=.metadata.creationTimestamp \ + -o jsonpath='{.items[-1].metadata.name}' 2>/dev/null || echo "") + + if [ -z "$pipelinerun" ]; then + echo -e "${YELLOW}No PipelineRun found${NC}" + return + fi + + echo -e "${GREEN}Latest PipelineRun: ${BLUE}$pipelinerun${NC}" + echo "" + + echo -e "${YELLOW}View logs:${NC}" + echo " opc pipelinerun logs $pipelinerun -n $NAMESPACE" + echo "" + + echo -e "${YELLOW}View status:${NC}" + echo " oc get pipelinerun $pipelinerun -n $NAMESPACE -w" + echo "" + + echo -e "${YELLOW}Get S3 test result URLs (after completion):${NC}" + echo " oc logs ${pipelinerun}-upload-results-to-s3-pod -n $NAMESPACE" + echo "" + + if command -v opc &> /dev/null; then + read -p "View logs now? [y/N] " -n 1 -r + echo "" + if [[ $REPLY =~ ^[Yy]$ ]]; then + opc pipelinerun logs "$pipelinerun" -n "$NAMESPACE" + fi + else + echo -e "${YELLOW}Tip: Install 'opc' CLI to view Tekton Results${NC}" + echo " See: doc/QUERY-RESULTS-GUIDE.md" + fi +} + +# --- Main Program --- + +echo -e "${YELLOW}Checking dependencies...${NC}" +check_command "oc" +check_command "jq" + +echo -e "${GREEN}Dependencies verified${NC}" +echo "" + +# Check if logged in to OpenShift +if ! oc whoami &>/dev/null; then + echo -e "${RED}Error: Not logged in to OpenShift${NC}" + echo -e "${YELLOW}Please run: oc login${NC}" + exit 1 +fi + +echo -e "${GREEN}Logged in to OpenShift: $(oc whoami --show-server)${NC}" +echo "" + +# Check if namespace exists +if ! oc get namespace "$NAMESPACE" &>/dev/null; then + echo -e "${RED}Error: Namespace '$NAMESPACE' does not exist${NC}" + echo -e "${YELLOW}Please run: ./setup-complete-tekton-stack.sh${NC}" + exit 1 +fi + +echo -e "${GREEN}Namespace '$NAMESPACE' exists${NC}" +echo "" + +# Get OCM credentials +echo -e "${YELLOW}--- 1. Getting OCM Credentials ---${NC}" +if ! get_ocm_credentials; then + prompt_ocm_credentials +fi + +# Get AWS credentials (required for ROSA Provider) +echo "" +echo -e "${YELLOW}--- 2. Getting AWS Credentials (required for ROSA Provider) ---${NC}" +if ! get_aws_credentials; then + prompt_aws_credentials +fi + +# Create or update Secret +create_or_update_secret + +# Parse command line arguments +CLUSTER_ID="${1:-}" +TEST_IMAGE="${2:-quay.io/redhat-services-prod/oeo-cicada-tenant/osd-example-operator-e2e}" +IMAGE_TAG="${3:-latest}" +OSDE2E_CONFIGS="${4:-rosa,sts,int,ad-hoc-image}" + +# Run Pipeline +run_pipeline "$CLUSTER_ID" "$TEST_IMAGE" "$IMAGE_TAG" "$OSDE2E_CONFIGS" + +# Show status +show_status + +echo "" +echo -e "${BLUE}╔══════════════════════════════════════════════════════════════╗${NC}" +echo -e "${BLUE}║ Done! ║${NC}" +echo -e "${BLUE}╚══════════════════════════════════════════════════════════════╝${NC}" +echo "" +echo -e "${CYAN}Test result storage locations:${NC}" +echo " • Loki S3: Real-time logs (stdout/stderr) - query via Loki API" +echo " • S3 Bucket: Test files (logs, reports, JUnit XML) - with pre-signed URLs" +echo "" +echo -e "${CYAN}Get S3 URLs after pipeline completes:${NC}" +latest_pr=$(oc get pipelinerun -n "$NAMESPACE" \ + --sort-by=.metadata.creationTimestamp \ + -o jsonpath='{.items[-1].metadata.name}' 2>/dev/null || echo "") +echo " oc logs ${latest_pr}-upload-results-to-s3-pod -n $NAMESPACE" +echo "" diff --git a/test/e2e/setup-complete-tekton-stack.sh b/test/e2e/setup-complete-tekton-stack.sh new file mode 100755 index 0000000..4c187d6 --- /dev/null +++ b/test/e2e/setup-complete-tekton-stack.sh @@ -0,0 +1,849 @@ +#!/bin/bash + +# 🚀 Complete Tekton Stack Setup Script +# Sets up OpenShift Pipelines, Tekton Results, and Loki from scratch + +set -euo pipefail + +# --- Color Definitions --- +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +MAGENTA='\033[0;35m' +NC='\033[0m' # No Color + +# --- Configuration --- +NAMESPACE="${NAMESPACE:-osde2e-tekton}" +SKIP_CONFIRMATION="${SKIP_CONFIRMATION:-false}" + +# --- Helper Functions --- + +print_header() { + echo "" + echo -e "${BLUE}╔══════════════════════════════════════════════════════════════╗${NC}" + echo -e "${BLUE}║ $1${NC}" + echo -e "${BLUE}╚══════════════════════════════════════════════════════════════╝${NC}" + echo "" +} + +print_step() { + echo "" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}$1${NC}" + echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" +} + +print_success() { + echo -e "${GREEN}✅ $1${NC}" +} + +print_error() { + echo -e "${RED}❌ $1${NC}" +} + +print_warning() { + echo -e "${YELLOW}⚠️ $1${NC}" +} + +print_info() { + echo -e "${CYAN}ℹ️ $1${NC}" +} + +wait_for_condition() { + local description="$1" + local condition="$2" + local timeout="${3:-300}" + local interval="${4:-10}" + + echo -e "${YELLOW}Waiting for: $description${NC}" + + local elapsed=0 + while [ $elapsed -lt $timeout ]; do + if eval "$condition" &>/dev/null; then + print_success "$description" + return 0 + fi + + echo " Still waiting... ($elapsed/$timeout seconds)" + sleep $interval + elapsed=$((elapsed + interval)) + done + + print_error "Timeout waiting for: $description" + return 1 +} + +# --- Main Script --- + +print_header "Complete Tekton Stack Setup" + +echo -e "${MAGENTA}This script will install:${NC}" +echo " 1. OpenShift Pipelines Operator (Tekton)" +echo " 2. Tekton Results (PostgreSQL)" +echo " 3. Loki Operator" +echo " 4. LokiStack (S3 storage)" +echo " 5. ClusterLogForwarder" +echo " 6. Tekton resources in namespace: $NAMESPACE" +echo "" + +if [ "$SKIP_CONFIRMATION" != "true" ]; then + read -p "Continue? [y/N] " -n 1 -r + echo "" + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Aborted." + exit 0 + fi +fi + +# ========================================== +# Step 0: Prerequisites Check +# ========================================== + +print_step "Step 0: Checking Prerequisites" + +# Check oc CLI +if ! command -v oc &>/dev/null; then + print_error "oc CLI not found. Please install it first." + exit 1 +fi +print_success "oc CLI found" + +# Check cluster connection +if ! oc whoami &>/dev/null; then + print_error "Not logged in to OpenShift cluster" + echo "" + echo "Please run: oc login " + exit 1 +fi + +CLUSTER_URL=$(oc whoami --show-server) +CLUSTER_USER=$(oc whoami) +print_success "Connected to cluster: $CLUSTER_URL" +print_success "Logged in as: $CLUSTER_USER" + +# Check admin permissions +if ! oc auth can-i '*' '*' --all-namespaces &>/dev/null; then + print_warning "You may not have cluster admin permissions" + print_warning "Some operations might fail" + if [ "$SKIP_CONFIRMATION" != "true" ]; then + read -p "Continue anyway? [y/N] " -n 1 -r + echo "" + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 0 + fi + fi +fi + +# ========================================== +# Step 1: Install OpenShift Pipelines +# ========================================== + +print_step "Step 1: Installing OpenShift Pipelines Operator" + +if oc get tektonconfig config &>/dev/null; then + print_success "OpenShift Pipelines already installed" +else + print_info "Installing OpenShift Pipelines Operator..." + + # Apply subscription + cat </dev/null)\" = \"True\" ]" \ + 300 10 + + print_success "OpenShift Pipelines Operator installed" +fi + +# Verify +echo "" +print_info "Verifying Tekton components..." +oc get tektonconfig config +echo "" +oc get pods -n openshift-pipelines | head -10 + +# ========================================== +# Step 2: Enable Tekton Results +# ========================================== + +print_step "Step 2: Enabling Tekton Results" + +# Check if Results is already enabled +RESULTS_DISABLED=$(oc get tektonconfig config -o jsonpath='{.spec.result.disabled}' 2>/dev/null || echo "true") + +if [ "$RESULTS_DISABLED" = "false" ]; then + print_success "Tekton Results already enabled" +else + print_info "Enabling Tekton Results..." + + oc patch tektonconfig config --type=merge -p '{ + "spec": { + "result": { + "disabled": false + } + } + }' + + # Wait for TektonResult custom resource + wait_for_condition \ + "TektonResult resource ready" \ + "[ \"\$(oc get tektonresult result -o jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}' 2>/dev/null)\" = \"True\" ]" \ + 60 5 + + # Wait for Results pods (deployed in openshift-pipelines namespace) + wait_for_condition \ + "Tekton Results pods ready" \ + "[ \$(oc get pods -n openshift-pipelines --no-headers 2>/dev/null | grep -c 'tekton-results.*Running') -ge 3 ]" \ + 300 10 + + print_success "Tekton Results enabled" +fi + +# Verify Results API +echo "" +print_info "Verifying Tekton Results API..." +if oc get service -n openshift-pipelines tekton-results-api-service &>/dev/null; then + RESULTS_API_SVC=$(oc get service -n openshift-pipelines tekton-results-api-service -o jsonpath='{.spec.clusterIP}:{.spec.ports[0].port}') + print_success "Tekton Results API Service: $RESULTS_API_SVC" + + # Check if route exists, if not suggest creating one + if oc get route -n openshift-pipelines tekton-results-api &>/dev/null; then + RESULTS_API_ROUTE=$(oc get route -n openshift-pipelines tekton-results-api -o jsonpath='{.spec.host}') + print_success "Tekton Results API Route: https://$RESULTS_API_ROUTE" + else + print_info "No external route configured (API accessible internally)" + fi +else + print_warning "Tekton Results API service not found (may take a few more minutes)" +fi + +oc get pods -n openshift-pipelines | grep tekton-results + +# ========================================== +# Step 3: Install Loki Operator +# ========================================== + +print_step "Step 3: Installing Loki Operator" + +# Check if already installed (check both possible namespaces) +LOKI_INSTALLED=false +if oc get csv -n openshift-operators 2>/dev/null | grep -q loki-operator; then + print_success "Loki Operator already installed in openshift-operators" + LOKI_INSTALLED=true +elif oc get csv -n openshift-operators-redhat 2>/dev/null | grep -q loki-operator; then + print_success "Loki Operator already installed in openshift-operators-redhat" + LOKI_INSTALLED=true +fi + +if [ "$LOKI_INSTALLED" = "false" ]; then + print_info "Installing Loki Operator..." + + # Determine the best available channel + print_info "Checking available Loki Operator channels..." + + # Try to get available channels (prefer stable-6.4, then stable-6.3) + LOKI_CHANNEL="" + if oc get packagemanifest loki-operator -n openshift-marketplace -o jsonpath='{.status.channels[*].name}' 2>/dev/null | grep -q "stable-6.4"; then + LOKI_CHANNEL="stable-6.4" + print_info "Using channel: stable-6.4" + elif oc get packagemanifest loki-operator -n openshift-marketplace -o jsonpath='{.status.channels[*].name}' 2>/dev/null | grep -q "stable-6.3"; then + LOKI_CHANNEL="stable-6.3" + print_info "Using channel: stable-6.3" + elif oc get packagemanifest loki-operator -n openshift-marketplace -o jsonpath='{.status.channels[*].name}' 2>/dev/null | grep -q "stable-6.2"; then + LOKI_CHANNEL="stable-6.2" + print_warning "Using channel: stable-6.2 (older version)" + else + print_error "No stable channel found for loki-operator" + print_info "Available channels:" + oc get packagemanifest loki-operator -n openshift-marketplace -o jsonpath='{.status.channels[*].name}' 2>/dev/null || echo " Unable to query" + exit 1 + fi + + # Install in openshift-operators (standard global namespace) + cat </dev/null || echo "") + if [ -n "$EXPECTED_CSV" ]; then + print_info "Installing CSV: $EXPECTED_CSV" + break + fi + sleep 2 + done + + if [ -z "$EXPECTED_CSV" ]; then + print_error "Subscription did not resolve to a CSV" + oc get subscription loki-operator -n openshift-operators -o yaml + exit 1 + fi + + # Wait for CSV to reach Succeeded phase + wait_for_condition \ + "Loki Operator CSV: $EXPECTED_CSV" \ + "[ \"\$(oc get csv '$EXPECTED_CSV' -n openshift-operators -o jsonpath='{.status.phase}' 2>/dev/null)\" = \"Succeeded\" ]" \ + 300 10 + + print_success "Loki Operator installed" +fi + +# Verify +echo "" +print_info "Verifying Loki Operator..." +# CSV may be in either openshift-operators-redhat or openshift-operators +if oc get csv -n openshift-operators-redhat 2>/dev/null | grep -q loki-operator; then + oc get csv -n openshift-operators-redhat | grep loki-operator +elif oc get csv -n openshift-operators 2>/dev/null | grep -q loki-operator; then + oc get csv -n openshift-operators | grep loki-operator + print_info "Note: Loki Operator installed in openshift-operators (alternate location)" +fi + +# Operator pod runs in openshift-operators (managed by OLM) +if oc get pods -n openshift-operators 2>/dev/null | grep -q loki-operator; then + oc get pods -n openshift-operators | grep loki-operator +else + print_warning "Loki Operator pod not found (may still be starting)" +fi + +# ========================================== +# Step 4: Create Namespace for Testing +# ========================================== + +print_step "Step 4: Creating Namespace: $NAMESPACE" + +if oc get namespace "$NAMESPACE" &>/dev/null; then + print_success "Namespace $NAMESPACE already exists" +else + oc new-project "$NAMESPACE" + print_success "Namespace $NAMESPACE created" +fi + +# ========================================== +# Step 5: Configure AWS S3 for Loki +# ========================================== + +print_step "Step 5: Configuring AWS S3 for Loki" + +# Check if secret already exists +if oc get secret loki-s3-credentials -n "$NAMESPACE" &>/dev/null; then + print_success "S3 credentials secret already exists" + + read -p "Do you want to update it? [y/N] " -n 1 -r + echo "" + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + print_info "Skipping S3 configuration" + S3_CONFIGURED=true + else + S3_CONFIGURED=false + fi +else + S3_CONFIGURED=false +fi + +if [ "$S3_CONFIGURED" != "true" ]; then + echo "" + echo -e "${YELLOW}AWS S3 Configuration${NC}" + echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo "" + echo "You need:" + echo " 1. AWS Access Key ID" + echo " 2. AWS Secret Access Key" + echo " 3. S3 Bucket Name (will be created if it doesn't exist)" + echo " 4. AWS Region (default: us-east-1)" + echo "" + + read -p "AWS Access Key ID: " AWS_ACCESS_KEY_ID + read -sp "AWS Secret Access Key: " AWS_SECRET_ACCESS_KEY + echo "" + read -p "S3 Bucket Name: " S3_BUCKET_NAME + read -p "AWS Region [us-east-1]: " AWS_REGION + AWS_REGION="${AWS_REGION:-us-east-1}" + + # Validate inputs + if [ -z "$AWS_ACCESS_KEY_ID" ] || [ -z "$AWS_SECRET_ACCESS_KEY" ] || [ -z "$S3_BUCKET_NAME" ]; then + print_error "AWS credentials cannot be empty" + exit 1 + fi + + # Create S3 bucket if it doesn't exist + print_info "Checking if S3 bucket exists..." + if aws s3 ls "s3://$S3_BUCKET_NAME" --region "$AWS_REGION" &>/dev/null; then + print_success "S3 bucket $S3_BUCKET_NAME exists" + else + print_info "Creating S3 bucket: $S3_BUCKET_NAME" + if aws s3 mb "s3://$S3_BUCKET_NAME" --region "$AWS_REGION"; then + print_success "S3 bucket created" + + # Enable versioning + aws s3api put-bucket-versioning \ + --bucket "$S3_BUCKET_NAME" \ + --versioning-configuration Status=Enabled \ + --region "$AWS_REGION" || true + else + print_error "Failed to create S3 bucket" + exit 1 + fi + fi + + # Create secret + print_info "Creating S3 credentials secret..." + oc create secret generic loki-s3-credentials \ + --from-literal=access_key_id="$AWS_ACCESS_KEY_ID" \ + --from-literal=access_key_secret="$AWS_SECRET_ACCESS_KEY" \ + --from-literal=bucketnames="$S3_BUCKET_NAME" \ + --from-literal=endpoint="https://s3.${AWS_REGION}.amazonaws.com" \ + --from-literal=region="$AWS_REGION" \ + -n "$NAMESPACE" \ + --dry-run=client -o yaml | oc apply -f - + + print_success "S3 credentials secret created" +fi + +# ========================================== +# Step 6: Deploy LokiStack +# ========================================== + +print_step "Step 6: Deploying LokiStack" + +if oc get lokistack osde2e-loki -n "$NAMESPACE" &>/dev/null; then + print_success "LokiStack already exists" + + # Check current size + CURRENT_SIZE=$(oc get lokistack osde2e-loki -n "$NAMESPACE" -o jsonpath='{.spec.size}') + print_info "Current LokiStack size: $CURRENT_SIZE" +else + print_info "Creating LokiStack..." + + # Check available cluster resources to suggest appropriate size + # IMPORTANT: Check MAX resources of a SINGLE node, not total across all nodes + print_info "Checking cluster resources..." + + # Get the maximum allocatable CPU and Memory from any single worker node + MAX_NODE_CPU=$(oc get nodes -l node-role.kubernetes.io/worker -o json | \ + jq -r '[.items[].status.allocatable.cpu | rtrimstr("m") | tonumber] | max / 1000' 2>/dev/null || echo "0") + MAX_NODE_MEM=$(oc get nodes -l node-role.kubernetes.io/worker -o json | \ + jq -r '[.items[].status.allocatable.memory | rtrimstr("Ki") | tonumber] | max / 1024 / 1024' 2>/dev/null || echo "0") + + print_info "Max single worker node: ${MAX_NODE_CPU} cores, ${MAX_NODE_MEM}Gi allocatable" + + # Suggest size based on SINGLE NODE resources (since pods run on one node) + # 1x.demo: ~2 CPU, ~8Gi RAM per pod (single replica) - CRITICAL: Ingester needs 2 CPU + 8Gi + # 1x.extra-small: ~4 CPU, ~16Gi RAM per pod (2 replicas) + # 1x.small: ~6 CPU, ~24Gi RAM per pod (2 replicas) + LOKI_SIZE="1x.demo" + + # Use bc for floating point comparison if available, otherwise use integer comparison + if command -v bc &>/dev/null; then + if [ $(echo "$MAX_NODE_CPU >= 7" | bc) -eq 1 ] && [ $(echo "$MAX_NODE_MEM >= 32" | bc) -eq 1 ]; then + LOKI_SIZE="1x.small" + print_info "Sufficient node resources, using size: $LOKI_SIZE" + elif [ $(echo "$MAX_NODE_CPU >= 5" | bc) -eq 1 ] && [ $(echo "$MAX_NODE_MEM >= 20" | bc) -eq 1 ]; then + LOKI_SIZE="1x.extra-small" + print_info "Moderate node resources, using size: $LOKI_SIZE" + else + print_warning "Limited node resources detected" + print_warning "Using minimal size: $LOKI_SIZE (single replica mode)" + print_warning "Note: Each node has only ${MAX_NODE_CPU} CPU, ${MAX_NODE_MEM}Gi" + fi + else + # Fallback to integer comparison + MAX_CPU_INT=${MAX_NODE_CPU%.*} + MAX_MEM_INT=${MAX_NODE_MEM%.*} + if [ "$MAX_CPU_INT" -ge 7 ] && [ "$MAX_MEM_INT" -ge 32 ]; then + LOKI_SIZE="1x.small" + print_info "Sufficient node resources, using size: $LOKI_SIZE" + else + print_warning "Limited node resources detected" + print_warning "Using minimal size: $LOKI_SIZE (single replica mode)" + fi + fi + + print_info "Selected LokiStack size: $LOKI_SIZE" + + cat </dev/null)\" = \"True\" ]" \ + 600 15 || { + print_warning "LokiStack not fully ready yet, checking component status..." + oc get lokistack osde2e-loki -n "$NAMESPACE" -o jsonpath='{.status.components}' | jq '.' + } + + print_success "LokiStack deployed" +fi + +# Verify Loki components +echo "" +print_info "Verifying Loki components..." +oc get lokistack osde2e-loki -n "$NAMESPACE" +echo "" +print_info "Loki Pods:" +oc get pods -n "$NAMESPACE" | grep loki + +# Check critical components +echo "" +print_info "Checking critical components status..." +RUNNING_INGESTER=$(oc get pods -n "$NAMESPACE" -l app.kubernetes.io/component=ingester --field-selector=status.phase=Running -o name 2>/dev/null | wc -l | tr -d ' ') +RUNNING_DISTRIBUTOR=$(oc get pods -n "$NAMESPACE" -l app.kubernetes.io/component=distributor --field-selector=status.phase=Running -o name 2>/dev/null | wc -l | tr -d ' ') +RUNNING_GATEWAY=$(oc get pods -n "$NAMESPACE" -l app.kubernetes.io/component=lokistack-gateway --field-selector=status.phase=Running -o name 2>/dev/null | wc -l | tr -d ' ') + +if [ "$RUNNING_INGESTER" -ge 1 ] && [ "$RUNNING_DISTRIBUTOR" -ge 1 ] && [ "$RUNNING_GATEWAY" -ge 1 ]; then + print_success "All critical Loki components are running" + print_info " Ingester: $RUNNING_INGESTER, Distributor: $RUNNING_DISTRIBUTOR, Gateway: $RUNNING_GATEWAY" +else + print_warning "Some Loki components may still be starting or Pending" + print_warning " Ingester: $RUNNING_INGESTER, Distributor: $RUNNING_DISTRIBUTOR, Gateway: $RUNNING_GATEWAY" + print_info "Check pods with: oc get pods -n $NAMESPACE | grep loki" + print_info "For Pending pods, check: oc describe pod -n $NAMESPACE" +fi + +# ========================================== +# Step 7: Configure ClusterLogForwarder +# ========================================== + +print_step "Step 7: Configuring ClusterLogForwarder" + +# Check if Cluster Logging Operator is installed (check CSV, not just namespace) +LOGGING_INSTALLED=false +if oc get csv -n openshift-logging 2>/dev/null | grep -q 'cluster-logging.*Succeeded'; then + print_success "Cluster Logging Operator already installed" + LOGGING_INSTALLED=true +elif oc get namespace openshift-logging &>/dev/null && oc get subscription cluster-logging -n openshift-logging &>/dev/null; then + print_info "Cluster Logging Operator subscription exists, checking status..." + # Wait for it to be ready + if wait_for_condition \ + "Cluster Logging Operator ready" \ + "oc get csv -n openshift-logging | grep -q 'cluster-logging.*Succeeded'" \ + 120 10; then + LOGGING_INSTALLED=true + fi +fi + +if [ "$LOGGING_INSTALLED" = "false" ]; then + print_warning "Cluster Logging Operator not installed" + print_info "ClusterLogForwarder requires Cluster Logging Operator" + + if [ "$SKIP_CONFIRMATION" != "true" ]; then + read -p "Install Cluster Logging Operator? [y/N] " -n 1 -r + echo "" + else + REPLY="y" # Auto-yes if SKIP_CONFIRMATION=true + fi + + if [[ $REPLY =~ ^[Yy]$ ]]; then + oc create namespace openshift-logging || true + + # Create OperatorGroup first (required for operator installation) + print_info "Creating OperatorGroup..." + cat </dev/null && [ \"\$(oc get crd clusterlogforwarders.observability.openshift.io -o jsonpath='{.status.conditions[?(@.type==\"Established\")].status}')\" = \"True\" ]" \ + 60 5 + + print_success "Cluster Logging Operator installed" + LOGGING_INSTALLED=true + else + print_warning "Skipping ClusterLogForwarder setup" + print_warning "Logs will not be forwarded to Loki automatically" + fi +fi + +# Create ClusterLogForwarder +if [ "$LOGGING_INSTALLED" = "true" ]; then + print_info "Creating ClusterLogForwarder..." + + cat </dev/null; then + print_success " Installed" +else + print_error " Not found" +fi + +# Tekton Results +echo "" +echo "2. Tekton Results:" +RESULTS_PODS=$(oc get pods -n openshift-pipelines --no-headers 2>/dev/null | grep -c 'tekton-results.*Running' || echo 0) +if [ "$RESULTS_PODS" -ge 3 ]; then + print_success " Running ($RESULTS_PODS pods)" +else + print_error " Not ready ($RESULTS_PODS pods running)" +fi + +# Loki Operator +echo "" +echo "3. Loki Operator:" +# Check both possible namespaces +if oc get csv -n openshift-operators-redhat 2>/dev/null | grep -q loki-operator.*Succeeded; then + print_success " Installed (openshift-operators-redhat)" +elif oc get csv -n openshift-operators 2>/dev/null | grep -q loki-operator.*Succeeded; then + print_success " Installed (openshift-operators)" +else + print_error " Not installed" +fi + +# LokiStack +echo "" +echo "4. LokiStack:" +if oc get lokistack osde2e-loki -n "$NAMESPACE" &>/dev/null; then + LOKI_STATUS=$(oc get lokistack osde2e-loki -n "$NAMESPACE" -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 2>/dev/null || echo "Unknown") + if [ "$LOKI_STATUS" = "True" ]; then + print_success " Ready" + else + print_warning " Status: $LOKI_STATUS" + fi +else + print_error " Not found" +fi + +# ClusterLogForwarder +echo "" +echo "5. ClusterLogForwarder:" +if oc get clusterlogforwarder -n openshift-logging &>/dev/null; then + print_success " Configured" +else + print_warning " Not configured" +fi + +# Tekton Resources +echo "" +echo "6. Tekton Resources in $NAMESPACE:" +TASK_COUNT=$(oc get task -n "$NAMESPACE" --no-headers 2>/dev/null | wc -l | tr -d ' ') +PIPELINE_COUNT=$(oc get pipeline -n "$NAMESPACE" --no-headers 2>/dev/null | wc -l | tr -d ' ') +print_success " $TASK_COUNT Task(s), $PIPELINE_COUNT Pipeline(s)" + +# ========================================== +# Completion +# ========================================== + +print_header "Setup Complete!" + +echo -e "${GREEN}✅ All components installed successfully!${NC}" +echo "" +echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo -e "${CYAN}Next Steps${NC}" +echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo "" +echo "1. Create credentials secret:" +echo " ${YELLOW}oc create secret generic osde2e-credentials \\${NC}" +echo " ${YELLOW}--from-literal=OCM_CLIENT_ID=... \\${NC}" +echo " ${YELLOW}--from-literal=OCM_CLIENT_SECRET=... \\${NC}" +echo " ${YELLOW}--from-literal=AWS_ACCESS_KEY_ID=... \\${NC}" +echo " ${YELLOW}--from-literal=AWS_SECRET_ACCESS_KEY=... \\${NC}" +echo " ${YELLOW}-n $NAMESPACE${NC}" +echo "" +echo "2. Run a test:" +echo " ${YELLOW}./run-with-credentials.sh ${NC}" +echo "" +echo "3. View logs:" +echo " ${YELLOW}opc pipelinerun logs -n $NAMESPACE${NC}" +echo "" +echo "4. Query Tekton Results:" +echo " ${YELLOW}./tekton-results-api.sh query${NC}" +echo "" +echo "5. Access S3 test results (after pipeline completes):" +echo " ${YELLOW}# Check upload-results-to-s3 task logs for pre-signed URLs${NC}" +echo " ${YELLOW}oc logs -upload-results-to-s3-pod -n $NAMESPACE${NC}" +echo "" +echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo "" +echo -e "${MAGENTA}📚 Documentation:${NC}" +echo " • SETUP-FROM-SCRATCH.md - Manual setup guide" +echo " • QUICK-START-GUIDE.md - Running tests" +echo " • S3-RESULTS-UPLOAD.md - S3 result storage guide" +echo " • TEKTON-RESULTS-README.md - Results API" +echo " • OPC-CLI-SETUP.md - Install opc CLI" +echo " • FIX-S3-PERMISSIONS.md - S3 IAM permissions guide" +echo "" +echo -e "${YELLOW}⚠️ Important Notes:${NC}" +echo "" +echo "• S3 Test Results: Pipeline automatically uploads test logs/reports to S3" +echo "• Pre-signed URLs: Valid for 7 days, can be accessed directly in browser" +echo "• Loki S3 Storage: Real-time logs stored in binary chunks (query via Loki API)" +echo "• S3 Permissions: Ensure IAM user has s3:PutObject, s3:GetObject, s3:DeleteObject, s3:ListBucket" +echo "• Resource Requirements: LokiStack ingester requires ~2 CPU + 8Gi RAM" +echo "• If Loki pods are Pending, check: oc describe pod -n $NAMESPACE" +echo "" +echo -e "${GREEN}🎉 Happy testing!${NC}" +echo "" + diff --git a/test/e2e/tekton-results-api.sh b/test/e2e/tekton-results-api.sh new file mode 100755 index 0000000..73dbe27 --- /dev/null +++ b/test/e2e/tekton-results-api.sh @@ -0,0 +1,398 @@ +#!/bin/bash +# Tekton Results API Management Script + +set -euo pipefail + +# Color definitions +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +NAMESPACE="openshift-pipelines" +SERVICE="tekton-results-api-service" +PORT="8080" +SA_NAME="tekton-results-reader" +PID_FILE="/tmp/tekton-results-pf.pid" +LOG_FILE="/tmp/tekton-results-pf.log" + +# Display usage help +usage() { + cat << EOF +${BLUE}Tekton Results API Management Tool${NC} + +Usage: + $0 [command] + +Commands: + start Start port-forward + stop Stop port-forward + restart Restart port-forward + status Check port-forward status + query Query Results (requires port-forward running) + test Test API connection + setup Set up ServiceAccount and RBAC + cleanup Clean up all port-forward processes + +Examples: + $0 setup # Set up RBAC (run once) + $0 start # Start port-forward + $0 query # Query all Results + $0 test # Test connection + +EOF +} + +# Check if port-forward process is running +check_portforward() { + if [ -f "$PID_FILE" ]; then + local pid=$(cat "$PID_FILE") + if ps -p "$pid" > /dev/null 2>&1; then + return 0 + fi + fi + # Fallback: check if port is listening + lsof -i :$PORT > /dev/null 2>&1 +} + +# Get port-forward PID +get_pf_pid() { + if [ -f "$PID_FILE" ]; then + cat "$PID_FILE" + else + lsof -ti :$PORT 2>/dev/null || echo "" + fi +} + +# Set up ServiceAccount and RBAC +setup_rbac() { + echo -e "${BLUE}=== Setting up Tekton Results RBAC ===${NC}" + echo "" + + SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + RBAC_FILE="$SCRIPT_DIR/tekton-results-reader.yaml" + + if [ -f "$RBAC_FILE" ]; then + echo -e "${YELLOW}Applying $RBAC_FILE...${NC}" + oc apply -f "$RBAC_FILE" + echo -e "${GREEN}✅ RBAC configured${NC}" + else + echo -e "${YELLOW}RBAC file not found, creating resources manually...${NC}" + + # Create ServiceAccount + if ! oc get serviceaccount "$SA_NAME" -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${YELLOW} -> Creating ServiceAccount${NC}" + oc create serviceaccount "$SA_NAME" -n "$NAMESPACE" + else + echo -e "${GREEN} ✓ ServiceAccount exists${NC}" + fi + + # Create ClusterRole + if ! oc get clusterrole "$SA_NAME" >/dev/null 2>&1; then + echo -e "${YELLOW} -> Creating ClusterRole${NC}" + cat </dev/null 2>&1; then + echo -e "${YELLOW} -> Creating ClusterRoleBinding${NC}" + oc create clusterrolebinding "${SA_NAME}-binding" \ + --clusterrole="$SA_NAME" \ + --serviceaccount="${NAMESPACE}:${SA_NAME}" + else + echo -e "${GREEN} ✓ ClusterRoleBinding exists${NC}" + fi + + echo -e "${GREEN}✅ RBAC configured${NC}" + fi +} + +# Start port-forward +start_portforward() { + echo -e "${YELLOW}Starting port-forward...${NC}" + + # Check if already running + if check_portforward; then + local pid=$(get_pf_pid) + echo -e "${YELLOW}Warning: Port-forward already running (PID: $pid)${NC}" + echo -e "${YELLOW}Use '$0 restart' to restart${NC}" + return 0 + fi + + # Check if service exists + if ! oc get svc "$SERVICE" -n "$NAMESPACE" >/dev/null 2>&1; then + echo -e "${RED}❌ Service '$SERVICE' not found in namespace '$NAMESPACE'${NC}" + echo -e "${YELLOW}Tekton Results may not be enabled. Check:${NC}" + echo " oc get tektonconfig config -o jsonpath='{.spec.result.disabled}'" + return 1 + fi + + # Start port-forward in background + nohup oc port-forward -n "$NAMESPACE" svc/"$SERVICE" "$PORT":"$PORT" > "$LOG_FILE" 2>&1 & + local pf_pid=$! + echo "$pf_pid" > "$PID_FILE" + + # Wait and verify + echo "Waiting for connection..." + sleep 3 + + if ps -p "$pf_pid" > /dev/null 2>&1; then + # Test if port is actually listening + if curl -sk --connect-timeout 2 https://localhost:$PORT >/dev/null 2>&1 || lsof -i :$PORT >/dev/null 2>&1; then + echo -e "${GREEN}✅ Port-forward started (PID: $pf_pid)${NC}" + echo -e "${GREEN} Listening on: localhost:$PORT${NC}" + echo "" + echo -e "Use '${BLUE}$0 query${NC}' to query Results" + return 0 + fi + fi + + # Failed - show log + echo -e "${RED}❌ Port-forward failed to start${NC}" + echo "" + echo "Log output:" + cat "$LOG_FILE" 2>/dev/null || echo "(no log)" + rm -f "$PID_FILE" + return 1 +} + +# Stop port-forward +stop_portforward() { + echo -e "${YELLOW}Stopping port-forward...${NC}" + + local pid=$(get_pf_pid) + if [ -z "$pid" ]; then + echo -e "${YELLOW}Warning: No running port-forward found${NC}" + rm -f "$PID_FILE" + return 0 + fi + + kill "$pid" 2>/dev/null || true + sleep 1 + + if ps -p "$pid" > /dev/null 2>&1; then + echo -e "${YELLOW}Force terminating...${NC}" + kill -9 "$pid" 2>/dev/null || true + fi + + rm -f "$PID_FILE" + echo -e "${GREEN}✅ Port-forward stopped${NC}" +} + +# Check status +check_status() { + echo -e "${BLUE}=== Port-Forward Status ===${NC}" + echo "" + + if check_portforward; then + local pid=$(get_pf_pid) + echo -e "${GREEN}✅ Port-forward is running${NC}" + echo " PID: $pid" + echo " Port: localhost:$PORT" + echo "" + + # Test connection + echo -e "${YELLOW}Testing API connection...${NC}" + if curl -k -s -f https://localhost:$PORT/healthz > /dev/null 2>&1; then + echo -e "${GREEN}✅ API is accessible${NC}" + else + echo -e "${YELLOW}Warning: API health check failed${NC}" + fi + else + echo -e "${RED}❌ Port-forward is not running${NC}" + echo "" + echo -e "Use '${BLUE}$0 start${NC}' to start" + fi +} + +# Test API +test_api() { + echo -e "${BLUE}=== Testing Tekton Results API ===${NC}" + echo "" + + # Check port-forward + if ! check_portforward; then + echo -e "${RED}❌ Port-forward is not running${NC}" + echo -e "Please run first: ${BLUE}$0 start${NC}" + return 1 + fi + + # Ensure RBAC is set up + setup_rbac + + # Get Token + echo "" + echo -e "${YELLOW}Getting ServiceAccount token...${NC}" + local token + token=$(oc create token "$SA_NAME" -n "$NAMESPACE" --duration=1h 2>&1) || { + echo -e "${RED}❌ Failed to get token${NC}" + echo "$token" + return 1 + } + + echo -e "${GREEN}✅ Token obtained (length: ${#token})${NC}" + echo "" + + # Test API + echo -e "${YELLOW}Querying API...${NC}" + local response + response=$(curl -k -s -H "Authorization: Bearer $token" \ + "https://localhost:$PORT/apis/results.tekton.dev/v1alpha2/parents/-/results" 2>&1) + + # Check response + if echo "$response" | jq -e '.code' > /dev/null 2>&1; then + local error_msg + error_msg=$(echo "$response" | jq -r '.message') + echo -e "${RED}❌ API error: $error_msg${NC}" + return 1 + fi + + local count + count=$(echo "$response" | jq '.results | length' 2>/dev/null || echo "0") + echo -e "${GREEN}✅ API is working${NC}" + echo -e "${GREEN} Found $count Results${NC}" +} + +# Query Results +query_results() { + echo -e "${BLUE}=== Querying Tekton Results ===${NC}" + echo "" + + # Check port-forward + if ! check_portforward; then + echo -e "${RED}❌ Port-forward is not running${NC}" + echo -e "Please run first: ${BLUE}$0 start${NC}" + return 1 + fi + + # Ensure RBAC is set up + setup_rbac + + # Get Token + echo "" + echo -e "${YELLOW}Getting token...${NC}" + local token + token=$(oc create token "$SA_NAME" -n "$NAMESPACE" --duration=1h 2>&1) || { + echo -e "${RED}❌ Failed to get token${NC}" + echo "$token" + return 1 + } + echo -e "${GREEN}✅ Token obtained${NC}" + echo "" + + # Query Results + echo -e "${YELLOW}Querying all Results...${NC}" + local results + results=$(curl -k -s -H "Authorization: Bearer $token" \ + "https://localhost:$PORT/apis/results.tekton.dev/v1alpha2/parents/-/results") + + # Check for errors + if echo "$results" | jq -e '.code' > /dev/null 2>&1; then + local error_msg + error_msg=$(echo "$results" | jq -r '.message') + echo -e "${RED}❌ Query failed: $error_msg${NC}" + return 1 + fi + + # Statistics + local total + total=$(echo "$results" | jq '.results | length') + echo -e "${GREEN}✅ Found $total Results${NC}" + echo "" + + # Display most recent 5 + echo -e "${BLUE}Most recent 5 Results:${NC}" + echo "$results" | jq -r '.results[0:5] | .[] | + " • \(.name | split("/")[2]) + Created: \(.created_time) + ID: \(.id)"' | sed 's/^/ /' + echo "" + + # Group by namespace + echo -e "${BLUE}Results by Namespace:${NC}" + echo "$results" | jq -r '.results | group_by(.name | split("/")[0]) | + .[] | " • \(.[0].name | split("/")[0]): \(length)"' +} + +# Clean up all port-forwards +cleanup_all() { + echo -e "${YELLOW}Cleaning up all Tekton Results port-forwards...${NC}" + + # Find all related processes + local pids + pids=$(pgrep -f "port-forward.*tekton-results" 2>/dev/null || echo "") + + if [ -z "$pids" ]; then + echo -e "${YELLOW}Warning: No related processes found${NC}" + rm -f "$PID_FILE" + return 0 + fi + + # Kill all processes + for pid in $pids; do + echo -e "${YELLOW} Terminating process $pid...${NC}" + kill "$pid" 2>/dev/null || true + done + + sleep 1 + rm -f "$PID_FILE" + echo -e "${GREEN}✅ Cleanup complete${NC}" +} + +# Main logic +case "${1:-}" in + start) + start_portforward + ;; + stop) + stop_portforward + ;; + restart) + stop_portforward + sleep 1 + start_portforward + ;; + status) + check_status + ;; + test) + test_api + ;; + query) + query_results + ;; + setup) + setup_rbac + ;; + cleanup) + cleanup_all + ;; + -h|--help|help|"") + usage + ;; + *) + echo -e "${RED}❌ Unknown command: $1${NC}" + echo "" + usage + exit 1 + ;; +esac diff --git a/test/e2e/tekton-results-reader.yaml b/test/e2e/tekton-results-reader.yaml new file mode 100644 index 0000000..3a0b821 --- /dev/null +++ b/test/e2e/tekton-results-reader.yaml @@ -0,0 +1,38 @@ +--- +# ServiceAccount for querying Tekton Results API +apiVersion: v1 +kind: ServiceAccount +metadata: + name: tekton-results-reader + namespace: openshift-pipelines +--- +# ClusterRole for reading Tekton Results +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: tekton-results-reader +rules: +- apiGroups: + - results.tekton.dev + resources: + - results + - records + - logs + verbs: + - get + - list + - watch +--- +# Bind the ClusterRole to the ServiceAccount +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: tekton-results-reader-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-results-reader +subjects: +- kind: ServiceAccount + name: tekton-results-reader + namespace: openshift-pipelines diff --git a/test/e2e/upload-to-s3-task.yml b/test/e2e/upload-to-s3-task.yml new file mode 100644 index 0000000..6c720c5 --- /dev/null +++ b/test/e2e/upload-to-s3-task.yml @@ -0,0 +1,158 @@ +# Task: Upload Test Results to S3 +# This task uploads test logs and reports to S3 for long-term storage and URL access +apiVersion: tekton.dev/v1 +kind: Task +metadata: + name: upload-to-s3-task + labels: + app.kubernetes.io/version: "0.1" + annotations: + tekton.dev/pipelines.minVersion: "0.17.0" + tekton.dev/categories: Storage + tekton.dev/tags: s3,upload,results + tekton.dev/displayName: "Upload Results to S3" + tekton.dev/platforms: "linux/amd64" +spec: + description: >- + Upload test results (logs, JUnit XML, reports) to S3 bucket for long-term + storage. Generates pre-signed URLs for easy browser access. + + params: + - name: S3_BUCKET + type: string + description: S3 bucket name for storing test results + default: "osde2e-loki-logs" + - name: PIPELINE_RUN_NAME + type: string + description: Name of the PipelineRun (used for S3 path) + - name: AWS_REGION + type: string + description: AWS region for S3 bucket + default: "us-east-1" + - name: TEST_STATUS + type: string + description: Test status (PASS/FAIL) + default: "UNKNOWN" + - name: OSDE2E_CONFIGS + type: string + description: Test configuration + default: "" + + workspaces: + - name: test-results + description: Workspace containing test results to upload + mountPath: /workspace/test-results + + results: + - name: s3-path + description: S3 path where results are stored + - name: upload-status + description: Upload status (SUCCESS/FAILED) + + steps: + - name: upload-to-s3 + image: amazon/aws-cli:2.15.0 + env: + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: loki-s3-credentials + key: access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: loki-s3-credentials + key: access_key_secret + - name: AWS_DEFAULT_REGION + value: "$(params.AWS_REGION)" + script: | + #!/bin/bash + set -euo pipefail + + S3_BUCKET="$(params.S3_BUCKET)" + PIPELINE_RUN="$(params.PIPELINE_RUN_NAME)" + + DATE_PREFIX=$(date +%Y-%m-%d) + TIMESTAMP=$(date +%Y%m%d-%H%M%S) + S3_PREFIX="test-results/${DATE_PREFIX}/${PIPELINE_RUN}-${TIMESTAMP}" + + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "📤 Uploading Test Results to S3" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Bucket: ${S3_BUCKET}" + echo "Prefix: ${S3_PREFIX}" + echo "Test Status: $(params.TEST_STATUS)" + echo "" + + # Upload all files + UPLOAD_STATUS="SUCCESS" + if aws s3 cp /workspace/test-results/ "s3://${S3_BUCKET}/${S3_PREFIX}/" --recursive 2>&1; then + echo "✅ Upload completed" + else + echo "⚠️ Upload had issues" + UPLOAD_STATUS="PARTIAL" + fi + + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "📁 Uploaded Files" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + aws s3 ls "s3://${S3_BUCKET}/${S3_PREFIX}/" --recursive 2>/dev/null | head -20 + + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "🔗 Access URLs" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "" + echo "S3 URI: s3://${S3_BUCKET}/${S3_PREFIX}/" + echo "" + echo "Download command:" + echo " aws s3 cp s3://${S3_BUCKET}/${S3_PREFIX}/ ./results/ --recursive" + echo "" + + # Generate presigned URLs (valid 7 days) + echo "Pre-signed URLs (valid 7 days):" + + if aws s3 ls "s3://${S3_BUCKET}/${S3_PREFIX}/logs/osde2e-full.log" 2>/dev/null; then + echo "" + echo "📄 osde2e-full.log:" + aws s3 presign "s3://${S3_BUCKET}/${S3_PREFIX}/logs/osde2e-full.log" --expires-in 604800 + fi + + if aws s3 ls "s3://${S3_BUCKET}/${S3_PREFIX}/logs/consolidated.log" 2>/dev/null; then + echo "" + echo "📄 consolidated.log:" + aws s3 presign "s3://${S3_BUCKET}/${S3_PREFIX}/logs/consolidated.log" --expires-in 604800 + fi + + # Find and presign first XML file + FIRST_XML=$(aws s3 ls "s3://${S3_BUCKET}/${S3_PREFIX}/" --recursive 2>/dev/null | grep "\.xml$" | head -1 | awk '{print $4}') + if [ -n "$FIRST_XML" ]; then + echo "" + echo "📄 $(basename $FIRST_XML):" + aws s3 presign "s3://${S3_BUCKET}/$FIRST_XML" --expires-in 604800 + fi + + echo "" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "📋 Final Report" + echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" + echo "Test Status: $(params.TEST_STATUS)" + echo "Config: $(params.OSDE2E_CONFIGS)" + echo "S3 Path: s3://${S3_BUCKET}/${S3_PREFIX}/" + echo "Upload Status: ${UPLOAD_STATUS}" + echo "" + + # Write results + echo -n "s3://${S3_BUCKET}/${S3_PREFIX}/" > $(results.s3-path.path) + echo -n "${UPLOAD_STATUS}" > $(results.upload-status.path) + + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + diff --git a/test/e2e/view-pipeline-logs.sh b/test/e2e/view-pipeline-logs.sh new file mode 100755 index 0000000..47e041e --- /dev/null +++ b/test/e2e/view-pipeline-logs.sh @@ -0,0 +1,349 @@ +#!/bin/bash + +# Tekton Pipeline Log Viewer +# View logs from running and completed Pipelines + +set -euo pipefail + +# Color definitions +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +NC='\033[0m' # No Color + +NAMESPACE="${NAMESPACE:-osde2e-tekton}" + +# Display usage +usage() { + cat < [options] + +Options: + -n, --namespace Specify namespace (default: osde2e-tekton) + -s, --source Log source: + - pods: Read from running Pods (default) + - workspace: Read from Workspace PVC + - results: Read from Tekton Results + - all: Try all sources + -e, --export Export logs to file + -h, --help Show this help + +Examples: + # View latest PipelineRun logs + $0 latest + + # View specific PipelineRun + $0 osde2e-osd-example-operator-latest-h0haqru + + # Read from Workspace PVC (when Pod is deleted) + $0 osde2e-osd-example-operator-latest-h0haqru --source workspace + + # Export logs to file + $0 osde2e-osd-example-operator-latest-h0haqru --export logs.txt + +EOF + exit 1 +} + +# Parse arguments +PIPELINERUN_NAME="" +SOURCE="pods" +EXPORT_FILE="" + +while [[ $# -gt 0 ]]; do + case $1 in + -n|--namespace) + NAMESPACE="$2" + shift 2 + ;; + -s|--source) + SOURCE="$2" + shift 2 + ;; + -e|--export) + EXPORT_FILE="$2" + shift 2 + ;; + -h|--help) + usage + ;; + *) + if [ -z "$PIPELINERUN_NAME" ]; then + PIPELINERUN_NAME="$1" + else + echo -e "${RED}Error: Unknown argument: $1${NC}" + usage + fi + shift + ;; + esac +done + +# Check if PipelineRun name was provided +if [ -z "$PIPELINERUN_NAME" ]; then + echo -e "${RED}Error: Please provide PipelineRun name${NC}" + usage +fi + +# If "latest", get the most recent PipelineRun +if [ "$PIPELINERUN_NAME" = "latest" ]; then + echo -e "${CYAN}Finding latest PipelineRun...${NC}" + PIPELINERUN_NAME=$(oc get pipelinerun -n "$NAMESPACE" --sort-by=.metadata.creationTimestamp -o jsonpath='{.items[-1].metadata.name}') + if [ -z "$PIPELINERUN_NAME" ]; then + echo -e "${RED}Error: No PipelineRun found${NC}" + exit 1 + fi + echo -e "${GREEN}Found: $PIPELINERUN_NAME${NC}" +fi + +# Check if PipelineRun exists +if ! oc get pipelinerun "$PIPELINERUN_NAME" -n "$NAMESPACE" &>/dev/null; then + echo -e "${RED}Error: PipelineRun '$PIPELINERUN_NAME' does not exist${NC}" + exit 1 +fi + +# Get PipelineRun info +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo -e "${CYAN}PipelineRun Information${NC}" +echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + +PIPELINE_INFO=$(oc get pipelinerun "$PIPELINERUN_NAME" -n "$NAMESPACE" -o json) +STATUS=$(echo "$PIPELINE_INFO" | jq -r '.status.conditions[0].status // "Unknown"') +REASON=$(echo "$PIPELINE_INFO" | jq -r '.status.conditions[0].reason // "Unknown"') +START_TIME=$(echo "$PIPELINE_INFO" | jq -r '.status.startTime // "N/A"') +COMPLETION_TIME=$(echo "$PIPELINE_INFO" | jq -r '.status.completionTime // "N/A"') + +echo "Name: $PIPELINERUN_NAME" +echo "Namespace: $NAMESPACE" +echo "Status: $STATUS" +echo "Reason: $REASON" +echo "Started: $START_TIME" +echo "Completed: $COMPLETION_TIME" +echo "" + +# Check Pod status +PODS=$(oc get pods -n "$NAMESPACE" -l tekton.dev/pipelineRun="$PIPELINERUN_NAME" -o jsonpath='{.items[*].metadata.name}') +if [ -n "$PODS" ]; then + echo -e "${GREEN}Found Pods: $(echo "$PODS" | wc -w)${NC}" + for POD in $PODS; do + POD_STATUS=$(oc get pod "$POD" -n "$NAMESPACE" -o jsonpath='{.status.phase}') + NODE=$(oc get pod "$POD" -n "$NAMESPACE" -o jsonpath='{.spec.nodeName}') + echo " - $POD (status: $POD_STATUS, node: $NODE)" + done +else + echo -e "${YELLOW}Warning: No running Pods found (may have been cleaned up)${NC}" +fi +echo "" + +# Read logs from Pods +read_logs_from_pods() { + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}Reading logs from Pods${NC}" + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + + if [ -z "$PODS" ]; then + echo -e "${RED}Error: No Pods available${NC}" + return 1 + fi + + # Try using opc + if command -v opc &>/dev/null; then + echo -e "${CYAN}Using opc CLI...${NC}" + opc pipelinerun logs "$PIPELINERUN_NAME" -n "$NAMESPACE" + else + echo -e "${CYAN}Using oc logs...${NC}" + for POD in $PODS; do + echo "" + echo -e "${YELLOW}=== $POD ===${NC}" + oc logs "$POD" -n "$NAMESPACE" --all-containers=true --prefix=true + done + fi +} + +read_logs_from_workspace() { + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}Reading logs from Workspace PVC${NC}" + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + + # Extract PipelineRun short ID + RUN_ID=$(echo "$PIPELINERUN_NAME" | rev | cut -d'-' -f1 | rev) + PVC_NAME="osde2e-test-workspace-$RUN_ID" + + echo "Looking for PVC: $PVC_NAME" + + if ! oc get pvc "$PVC_NAME" -n "$NAMESPACE" &>/dev/null; then + echo -e "${RED}Error: PVC '$PVC_NAME' does not exist${NC}" + return 1 + fi + + echo -e "${GREEN}Found PVC: $PVC_NAME${NC}" + echo "Creating temporary debug Pod..." + + # Create temporary Pod + DEBUG_POD="debug-viewer-$(date +%s)" + cat </dev/null +apiVersion: v1 +kind: Pod +metadata: + name: $DEBUG_POD + namespace: $NAMESPACE +spec: + containers: + - name: viewer + image: registry.access.redhat.com/ubi8/ubi-minimal + command: ["sleep", "300"] + volumeMounts: + - name: workspace + mountPath: /workspace + volumes: + - name: workspace + persistentVolumeClaim: + claimName: $PVC_NAME + restartPolicy: Never +EOF + + # Wait for Pod to be ready + echo "Waiting for Pod to be ready..." + if ! oc wait --for=condition=Ready pod/"$DEBUG_POD" -n "$NAMESPACE" --timeout=60s &>/dev/null; then + echo -e "${RED}Error: Pod failed to start${NC}" + oc delete pod "$DEBUG_POD" -n "$NAMESPACE" &>/dev/null || true + return 1 + fi + + echo -e "${GREEN}Debug Pod ready${NC}" + echo "" + + # List available log files + echo -e "${CYAN}Available log files:${NC}" + oc exec "$DEBUG_POD" -n "$NAMESPACE" -- find /workspace/test-results/logs -type f 2>/dev/null || true + echo "" + + # Read consolidated log + if oc exec "$DEBUG_POD" -n "$NAMESPACE" -- test -f /workspace/test-results/logs/consolidated.log 2>/dev/null; then + echo -e "${CYAN}=== Consolidated Log (consolidated.log) ===${NC}" + oc exec "$DEBUG_POD" -n "$NAMESPACE" -- cat /workspace/test-results/logs/consolidated.log + else + echo -e "${YELLOW}Warning: consolidated.log not found, showing all available logs:${NC}" + oc exec "$DEBUG_POD" -n "$NAMESPACE" -- sh -c 'for log in /workspace/test-results/logs/*.log; do echo ""; echo "=== $(basename $log) ==="; cat "$log"; done' + fi + + # Cleanup + echo "" + echo "Cleaning up temporary Pod..." + oc delete pod "$DEBUG_POD" -n "$NAMESPACE" &>/dev/null || true +} + +read_logs_from_results() { + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${CYAN}Reading from Tekton Results${NC}" + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + + # Get TaskRuns + TASKRUNS=$(oc get taskrun -n "$NAMESPACE" -l tekton.dev/pipelineRun="$PIPELINERUN_NAME" -o jsonpath='{.items[*].metadata.name}') + + if [ -z "$TASKRUNS" ]; then + echo -e "${RED}Error: No TaskRuns found${NC}" + return 1 + fi + + for TASKRUN in $TASKRUNS; do + echo "" + echo -e "${YELLOW}=== TaskRun: $TASKRUN ===${NC}" + + # Get Results + RESULTS=$(oc get taskrun "$TASKRUN" -n "$NAMESPACE" -o json | jq -r '.status.taskResults // []') + + if [ "$RESULTS" = "[]" ] || [ "$RESULTS" = "null" ]; then + echo "No Results available" + continue + fi + + echo "$RESULTS" | jq -r '.[] | "\(.name): \(.value)"' + done + + # Show PipelineRun Results + echo "" + echo -e "${YELLOW}=== PipelineRun Results ===${NC}" + PR_RESULTS=$(echo "$PIPELINE_INFO" | jq -r '.status.pipelineResults // []') + + if [ "$PR_RESULTS" = "[]" ] || [ "$PR_RESULTS" = "null" ]; then + echo "No Results available" + else + echo "$PR_RESULTS" | jq -r '.[] | "\(.name): \(.value)"' + fi +} + +# Execute log reading +OUTPUT="" +case "$SOURCE" in + pods) + OUTPUT=$(read_logs_from_pods 2>&1) || true + echo "$OUTPUT" + ;; + workspace) + OUTPUT=$(read_logs_from_workspace 2>&1) || true + echo "$OUTPUT" + ;; + results) + OUTPUT=$(read_logs_from_results 2>&1) || true + echo "$OUTPUT" + ;; + all) + echo -e "${CYAN}Trying all sources...${NC}" + echo "" + + OUTPUT+="=== Source: Pods ===\n" + OUTPUT+=$(read_logs_from_pods 2>&1 || echo "Unable to read from Pods") + OUTPUT+="\n\n" + + OUTPUT+="=== Source: Workspace PVC ===\n" + OUTPUT+=$(read_logs_from_workspace 2>&1 || echo "Unable to read from Workspace") + OUTPUT+="\n\n" + + OUTPUT+="=== Source: Tekton Results ===\n" + OUTPUT+=$(read_logs_from_results 2>&1 || echo "Unable to read from Results") + + echo -e "$OUTPUT" + ;; + *) + echo -e "${RED}Error: Unknown log source: $SOURCE${NC}" + usage + ;; +esac + +# Export logs to file +if [ -n "$EXPORT_FILE" ]; then + echo "" + echo -e "${CYAN}Exporting logs to file: $EXPORT_FILE${NC}" + + if [ -n "$OUTPUT" ]; then + echo -e "$OUTPUT" > "$EXPORT_FILE" + else + # Re-read and export + case "$SOURCE" in + pods) + read_logs_from_pods > "$EXPORT_FILE" 2>&1 || true + ;; + workspace) + read_logs_from_workspace > "$EXPORT_FILE" 2>&1 || true + ;; + results) + read_logs_from_results > "$EXPORT_FILE" 2>&1 || true + ;; + esac + fi + + echo -e "${GREEN}Logs exported${NC}" + echo "File: $EXPORT_FILE" + echo "Size: $(du -h "$EXPORT_FILE" | cut -f1)" +fi + +echo "" +echo -e "${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" +echo -e "${GREEN}Done${NC}" +echo -e "${GREEN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" From 1d7b91e68190d9a50896057f86c5ea123c0d51eb Mon Sep 17 00:00:00 2001 From: YiqinZhang Date: Tue, 9 Dec 2025 23:47:00 -0500 Subject: [PATCH 13/13] update worksapace --- test/e2e/doc/MANUAL-SETUP-GUIDE.md | 12 +- test/e2e/doc/REQUIRED-CONFIG-FILES.md | 196 ++++++++------------------ test/e2e/doc/STORAGE-GUIDE.md | 52 +++++-- test/e2e/e2e-tekton-template.yml | 34 ++--- test/e2e/osde2e-pipeline.yml | 12 +- test/e2e/osde2e-tekton-task.yml | 157 +++++++++++---------- test/e2e/upload-to-s3-task.yml | 49 +++++-- test/e2e/view-pipeline-logs.sh | 27 ++-- 8 files changed, 260 insertions(+), 279 deletions(-) diff --git a/test/e2e/doc/MANUAL-SETUP-GUIDE.md b/test/e2e/doc/MANUAL-SETUP-GUIDE.md index 93131d9..46f321d 100644 --- a/test/e2e/doc/MANUAL-SETUP-GUIDE.md +++ b/test/e2e/doc/MANUAL-SETUP-GUIDE.md @@ -731,12 +731,12 @@ oc run pvc-reader --rm -it --restart=Never \ \"stdin\": true, \"tty\": true, \"volumeMounts\": [{ - \"name\": \"test-results\", + \"name\": \"workspace\", \"mountPath\": \"/workspace\" }] }], \"volumes\": [{ - \"name\": \"test-results\", + \"name\": \"workspace\", \"persistentVolumeClaim\": { \"claimName\": \"$PVC_NAME\" } @@ -745,10 +745,10 @@ oc run pvc-reader --rm -it --restart=Never \ }" \ -n osde2e-tekton -# Inside pod: -# ls /workspace/ -# cat /workspace/logs/osde2e-full.log -# cat /workspace/reports/test_output.log +# Inside pod (Prow-compatible paths): +# ls /workspace/artifacts/ +# cat /workspace/artifacts/logs/osde2e-full.log +# cat /workspace/artifacts/test_output.log ``` ### 11.5 Query Tekton Results API diff --git a/test/e2e/doc/REQUIRED-CONFIG-FILES.md b/test/e2e/doc/REQUIRED-CONFIG-FILES.md index c076880..9fffea1 100644 --- a/test/e2e/doc/REQUIRED-CONFIG-FILES.md +++ b/test/e2e/doc/REQUIRED-CONFIG-FILES.md @@ -26,6 +26,16 @@ Complete list of configuration files needed for manual OSDE2E Tekton Pipeline se - Captures JUnit XML results - Stores logs in workspace PVC - Produces structured results for Tekton Results +- Uses single workspace with subdirectories (Prow-compatible) + +**Workspace Structure:** +``` +workspace/ +├── artifacts/ # ARTIFACTS - JUnit XML, reports, logs +│ ├── junit/ # JUnit XML results +│ └── logs/ # Test logs +└── shared/ # SHARED_DIR - Data shared between steps +``` **Apply Command:** ```bash @@ -61,33 +71,10 @@ spec: - name: IMAGE_TAG type: string default: "latest" - - name: OCM_CLIENT_ID - type: string - default: "" - - name: OCM_CLIENT_SECRET - type: string - default: "" - - name: AWS_ACCESS_KEY_ID - type: string - default: "" - - name: AWS_SECRET_ACCESS_KEY - type: string - default: "" - - name: CLOUD_PROVIDER_REGION - type: string - default: "us-east-1" - - name: LOG_BUCKET - type: string - default: "osde2e-logs" - - name: USE_EXISTING_CLUSTER - type: string - default: "TRUE" - name: CLUSTER_ID type: string default: "" - - name: CAD_PAGERDUTY_ROUTING_KEY - type: string - default: "" + # ... other params results: - name: test-results @@ -100,63 +87,43 @@ spec: description: Test execution summary workspaces: - - name: test-results - description: Workspace for storing test results and logs - mountPath: /workspace/test-results + - name: workspace + description: Combined workspace for artifacts and shared data (Prow-compatible paths) steps: - name: setup-test-environment image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest - env: - - name: HOME - value: /tekton/home script: | #!/bin/bash set -euo pipefail - mkdir -p $(workspaces.test-results.path)/junit - mkdir -p $(workspaces.test-results.path)/logs - mkdir -p $(workspaces.test-results.path)/reports - mkdir -p $(workspaces.test-results.path)/shared - echo "Workspace directories created" + + # Define paths matching Prow structure within single workspace + ARTIFACTS_DIR="$(workspaces.workspace.path)/artifacts" + SHARED_DIR="$(workspaces.workspace.path)/shared" + + mkdir -p ${ARTIFACTS_DIR}/junit + mkdir -p ${ARTIFACTS_DIR}/logs + mkdir -p ${SHARED_DIR} + echo "Workspace directories created (Prow-compatible)" - name: run-osde2e-tests - image: $(params.TEST_IMAGE):$(params.IMAGE_TAG) + image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest env: - - name: OSDE2E_CONFIGS - value: $(params.OSDE2E_CONFIGS) - - name: OCM_CLIENT_ID - value: $(params.OCM_CLIENT_ID) - - name: OCM_CLIENT_SECRET - value: $(params.OCM_CLIENT_SECRET) - - name: AWS_ACCESS_KEY_ID - value: $(params.AWS_ACCESS_KEY_ID) - - name: AWS_SECRET_ACCESS_KEY - value: $(params.AWS_SECRET_ACCESS_KEY) - - name: CLOUD_PROVIDER_REGION - value: $(params.CLOUD_PROVIDER_REGION) - - name: LOG_BUCKET - value: $(params.LOG_BUCKET) - - name: USE_EXISTING_CLUSTER - value: $(params.USE_EXISTING_CLUSTER) - - name: CLUSTER_ID - value: $(params.CLUSTER_ID) + # osde2e output configuration - use workspace subdirectories + - name: ARTIFACTS + value: "$(workspaces.workspace.path)/artifacts" - name: REPORT_DIR - value: $(workspaces.test-results.path)/reports - - name: JUNIT_REPORT_DIR - value: $(workspaces.test-results.path)/junit + value: "$(workspaces.workspace.path)/artifacts" + - name: SHARED_DIR + value: "$(workspaces.workspace.path)/shared" script: | #!/bin/bash set -euo pipefail + ARTIFACTS_DIR="$(workspaces.workspace.path)/artifacts" + # Run tests and capture results - /osde2e test --configs $OSDE2E_CONFIGS 2>&1 | tee $(workspaces.test-results.path)/logs/osde2e-full.log - - # Determine test status - if [ $? -eq 0 ]; then - echo "PASS" > /tmp/test-status.txt - else - echo "FAIL" > /tmp/test-status.txt - fi + /osde2e test --configs $(params.OSDE2E_CONFIGS) 2>&1 | tee ${ARTIFACTS_DIR}/logs/osde2e-full.log ``` @@ -168,7 +135,7 @@ spec: **Purpose:** Uploads test results to S3 for long-term storage and generates pre-signed URLs. **Key Features:** -- Uploads all files from workspace to S3 +- Uploads all files from workspace artifacts to S3 - Organizes by date: `test-results/YYYY-MM-DD//` - Generates 7-day pre-signed URLs for browser access @@ -209,8 +176,8 @@ spec: default: "" workspaces: - - name: test-results - mountPath: /workspace/test-results + - name: workspace + description: Combined workspace containing test artifacts to upload results: - name: s3-path @@ -244,10 +211,11 @@ spec: TIMESTAMP=$(date +%Y%m%d-%H%M%S) S3_PREFIX="test-results/${DATE_PREFIX}/${PIPELINE_RUN}-${TIMESTAMP}" - echo "Uploading to s3://${S3_BUCKET}/${S3_PREFIX}/" + # Upload from workspace artifacts directory + ARTIFACTS_DIR="$(workspaces.workspace.path)/artifacts" - # Upload all files - aws s3 cp /workspace/test-results/ "s3://${S3_BUCKET}/${S3_PREFIX}/" --recursive + echo "Uploading to s3://${S3_BUCKET}/${S3_PREFIX}/" + aws s3 cp ${ARTIFACTS_DIR}/ "s3://${S3_BUCKET}/${S3_PREFIX}/" --recursive # Generate pre-signed URLs (valid 7 days = 604800 seconds) echo "Pre-signed URLs (valid 7 days):" @@ -269,6 +237,7 @@ spec: - Runs main test Task - Automatically uploads results to S3 in `finally` section - Passes test status to S3 upload Task +- Uses single shared workspace **Apply Command:** ```bash @@ -295,39 +264,17 @@ spec: - name: IMAGE_TAG type: string default: "latest" - - name: OCM_CLIENT_ID - type: string - default: "" - - name: OCM_CLIENT_SECRET - type: string - default: "" - - name: AWS_ACCESS_KEY_ID - type: string - default: "" - - name: AWS_SECRET_ACCESS_KEY - type: string - default: "" - - name: CLOUD_PROVIDER_REGION - type: string - default: "us-east-1" - - name: LOG_BUCKET - type: string - default: "osde2e-logs" - - name: USE_EXISTING_CLUSTER - type: string - default: "TRUE" - name: CLUSTER_ID type: string default: "" - - name: CAD_PAGERDUTY_ROUTING_KEY - type: string - default: "" - name: S3_RESULTS_BUCKET type: string default: "osde2e-loki-logs" + # ... other params workspaces: - - name: test-workspace + - name: workspace + description: Combined workspace for artifacts and shared data (Prow-compatible paths) results: - name: final-test-status @@ -344,10 +291,11 @@ spec: value: $(params.TEST_IMAGE) - name: IMAGE_TAG value: $(params.IMAGE_TAG) - # ... other params passed through + - name: CLUSTER_ID + value: $(params.CLUSTER_ID) workspaces: - - name: test-results - workspace: test-workspace + - name: workspace + workspace: workspace finally: - name: upload-results-to-s3 @@ -358,13 +306,11 @@ spec: value: $(params.S3_RESULTS_BUCKET) - name: PIPELINE_RUN_NAME value: $(context.pipelineRun.name) - - name: AWS_REGION - value: $(params.CLOUD_PROVIDER_REGION) - name: TEST_STATUS value: $(tasks.osde2e-test.results.test-status) workspaces: - - name: test-results - workspace: test-workspace + - name: workspace + workspace: workspace ``` @@ -376,7 +322,7 @@ spec: **Purpose:** OpenShift Template for easily creating PipelineRuns. **Key Features:** -- Creates PVC for test workspace +- Uses volumeClaimTemplate for dynamic PVC creation (single workspace) - Creates PipelineRun with all parameters - Auto-generates unique JOBID - Sets timeouts (3 hours total) @@ -416,38 +362,12 @@ parameters: displayName: "Cluster ID" required: false value: '' - - name: OCM_CLIENT_ID - required: false - - name: OCM_CLIENT_SECRET - required: false - - name: AWS_ACCESS_KEY_ID - required: false - - name: AWS_SECRET_ACCESS_KEY - required: false - - name: CLOUD_PROVIDER_REGION - value: "us-east-1" - name: JOBID generate: expression from: "[0-9a-z]{7}" objects: - # PVC for workspace - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: osde2e-test-workspace-${JOBID} - labels: - app: osde2e - job-id: ${JOBID} - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi - storageClassName: gp3-csi - - # PipelineRun + # PipelineRun with volumeClaimTemplate (no separate PVC needed) - apiVersion: tekton.dev/v1 kind: PipelineRun metadata: @@ -472,11 +392,17 @@ objects: value: ${IMAGE_TAG} - name: CLUSTER_ID value: ${CLUSTER_ID} - # ... other params workspaces: - - name: test-workspace - persistentVolumeClaim: - claimName: osde2e-test-workspace-${JOBID} + # Single workspace using volumeClaimTemplate + - name: workspace + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp3-csi timeouts: pipeline: "3h0m0s" tasks: "2h45m0s" diff --git a/test/e2e/doc/STORAGE-GUIDE.md b/test/e2e/doc/STORAGE-GUIDE.md index a19a92c..45e8a19 100644 --- a/test/e2e/doc/STORAGE-GUIDE.md +++ b/test/e2e/doc/STORAGE-GUIDE.md @@ -27,7 +27,7 @@ Guide to test result storage: where data is stored, how to access it, and S3 con ▼ ▼ ▼ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ Workspace PVC │ │ Pod Logs │ │ Tekton Results │ -│ (test files) │ │ (stdout) │ │ (metadata) │ +│ (artifacts/) │ │ (stdout) │ │ (metadata) │ └────────┬────────┘ └────────┬────────┘ └────────┬────────┘ │ │ │ ▼ ▼ ▼ @@ -39,6 +39,30 @@ Guide to test result storage: where data is stored, how to access it, and S3 con --- +## Workspace Structure (Prow-Compatible) + +The Tekton pipeline uses a single workspace with subdirectories that match Prow's standard paths: + +``` +workspace/ +├── artifacts/ # Matches Prow ARTIFACTS environment variable +│ ├── junit/ # JUnit XML results +│ │ └── merged-results.xml +│ ├── logs/ # Test execution logs +│ │ ├── setup.log +│ │ ├── osde2e-full.log +│ │ ├── test-execution.log +│ │ ├── summary.log +│ │ └── consolidated.log +│ ├── install-log.txt +│ ├── test_output.log +│ └── uninstall-log.txt +└── shared/ # Matches Prow SHARED_DIR environment variable + └── cluster-id # Data shared between steps +``` + +--- + ## S3 Setup Guide ### Step 1: Create S3 Bucket @@ -196,17 +220,21 @@ Test outputs including logs, JUnit XML, and reports are uploaded to S3. ``` s3://osde2e-loki-logs/ └── test-results/ - └── 2025-12-03/ - └── osde2e-xxx-20251203-123456/ + └── 2025-12-10/ + └── osde2e-xxx-20251210-123456/ ├── logs/ │ ├── osde2e-full.log │ ├── consolidated.log - │ └── summary.log - ├── reports/ - │ ├── test_output.log - │ └── install-log.txt - └── junit/ - └── merged-results.xml + │ ├── setup.log + │ ├── summary.log + │ └── test-execution.log + ├── junit/ + │ └── merged-results.xml + ├── install/ + │ └── junit_xxx.xml + ├── install-log.txt + ├── test_output.log + └── uninstall-log.txt ``` ### Accessing Test Results @@ -218,7 +246,7 @@ s3://osde2e-loki-logs/ oc logs -upload-results-to-s3-pod -n osde2e-tekton # Output includes URLs like: -# osde2e-full.log: +# 📄 osde2e-full.log: # https://osde2e-loki-logs.s3.us-east-1.amazonaws.com/test-results/...?X-Amz-... ``` @@ -229,10 +257,10 @@ oc logs -upload-results-to-s3-pod -n osde2e-tekton aws s3 ls s3://osde2e-loki-logs/test-results/ --recursive | head -20 # Download results -aws s3 cp s3://osde2e-loki-logs/test-results/2025-12-03/osde2e-xxx/ ./results/ --recursive +aws s3 cp s3://osde2e-loki-logs/test-results/2025-12-10/osde2e-xxx/ ./results/ --recursive # Generate pre-signed URL manually -aws s3 presign s3://osde2e-loki-logs/test-results/2025-12-03/xxx/logs/osde2e-full.log --expires-in 604800 +aws s3 presign s3://osde2e-loki-logs/test-results/2025-12-10/xxx/logs/osde2e-full.log --expires-in 604800 ``` --- diff --git a/test/e2e/e2e-tekton-template.yml b/test/e2e/e2e-tekton-template.yml index 5fd1834..1a2ad8c 100644 --- a/test/e2e/e2e-tekton-template.yml +++ b/test/e2e/e2e-tekton-template.yml @@ -81,26 +81,6 @@ parameters: description: "PagerDuty routing key for alerts" required: false objects: - # PersistentVolumeClaim for test workspace - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: osde2e-test-workspace-${JOBID} - labels: - app: osde2e - component: testing - job-id: ${JOBID} - test-image-tag: ${IMAGE_TAG} - annotations: - description: "Workspace for OSDE2E test execution and result storage" - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi - storageClassName: gp3-csi - # PipelineRun for test execution # Note: Uses the default 'pipeline' ServiceAccount which has anyuid SCC - apiVersion: tekton.dev/v1 @@ -154,9 +134,17 @@ objects: - name: CAD_PAGERDUTY_ROUTING_KEY value: ${CAD_PAGERDUTY_ROUTING_KEY} workspaces: - - name: test-workspace - persistentVolumeClaim: - claimName: osde2e-test-workspace-${JOBID} + # Combined workspace for artifacts and shared data + # Using volumeClaimTemplate for dynamic PVC creation + - name: workspace + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi + storageClassName: gp3-csi # Timeout for the entire pipeline (3 hours like the original Job) timeouts: pipeline: "3h0m0s" diff --git a/test/e2e/osde2e-pipeline.yml b/test/e2e/osde2e-pipeline.yml index d5ca818..e3ccc5d 100644 --- a/test/e2e/osde2e-pipeline.yml +++ b/test/e2e/osde2e-pipeline.yml @@ -70,8 +70,8 @@ spec: default: "osde2e-loki-logs" workspaces: - - name: test-workspace - description: Shared workspace for test execution and result storage + - name: workspace + description: Combined workspace for artifacts and shared data (Prow-compatible paths) results: - name: final-test-status @@ -111,8 +111,8 @@ spec: - name: CAD_PAGERDUTY_ROUTING_KEY value: $(params.CAD_PAGERDUTY_ROUTING_KEY) workspaces: - - name: test-results - workspace: test-workspace + - name: workspace + workspace: workspace finally: # Upload results to S3 for long-term storage and URL access @@ -131,5 +131,5 @@ spec: - name: OSDE2E_CONFIGS value: $(params.OSDE2E_CONFIGS) workspaces: - - name: test-results - workspace: test-workspace + - name: workspace + workspace: workspace diff --git a/test/e2e/osde2e-tekton-task.yml b/test/e2e/osde2e-tekton-task.yml index 3a30d0d..bac0d40 100644 --- a/test/e2e/osde2e-tekton-task.yml +++ b/test/e2e/osde2e-tekton-task.yml @@ -74,9 +74,8 @@ spec: description: Test execution summary workspaces: - - name: test-results - description: Workspace for storing test results and logs - mountPath: /workspace/test-results + - name: workspace + description: Combined workspace for artifacts and shared data (Prow-compatible paths) steps: - name: setup-test-environment @@ -90,24 +89,30 @@ spec: echo "Setting up test environment..." - # Create workspace directories (no root needed) - mkdir -p $(workspaces.test-results.path)/junit - mkdir -p $(workspaces.test-results.path)/logs - mkdir -p $(workspaces.test-results.path)/reports - mkdir -p $(workspaces.test-results.path)/shared + # Define paths matching Prow structure within single workspace + ARTIFACTS_DIR="$(workspaces.workspace.path)/artifacts" + SHARED_DIR="$(workspaces.workspace.path)/shared" + + # Create directories matching Prow structure + mkdir -p ${ARTIFACTS_DIR}/junit + mkdir -p ${ARTIFACTS_DIR}/logs + mkdir -p ${SHARED_DIR} - echo "Workspace directories created" + echo "Workspace directories created (Prow-compatible)" # Verify directories exist and are writable - ls -la $(workspaces.test-results.path)/ + echo "=== ARTIFACTS directory ===" + ls -la ${ARTIFACTS_DIR}/ + echo "=== SHARED directory ===" + ls -la ${SHARED_DIR}/ # Log environment information - echo "=== Test Environment Setup ===" | tee $(workspaces.test-results.path)/logs/setup.log - echo "OSDE2E_CONFIGS: $(params.OSDE2E_CONFIGS)" | tee -a $(workspaces.test-results.path)/logs/setup.log - echo "TEST_IMAGE: $(params.TEST_IMAGE):$(params.IMAGE_TAG)" | tee -a $(workspaces.test-results.path)/logs/setup.log - echo "CLOUD_PROVIDER_REGION: $(params.CLOUD_PROVIDER_REGION)" | tee -a $(workspaces.test-results.path)/logs/setup.log - echo "USE_EXISTING_CLUSTER: $(params.USE_EXISTING_CLUSTER)" | tee -a $(workspaces.test-results.path)/logs/setup.log - echo "Setup completed successfully" | tee -a $(workspaces.test-results.path)/logs/setup.log + echo "=== Test Environment Setup ===" | tee ${ARTIFACTS_DIR}/logs/setup.log + echo "OSDE2E_CONFIGS: $(params.OSDE2E_CONFIGS)" | tee -a ${ARTIFACTS_DIR}/logs/setup.log + echo "TEST_IMAGE: $(params.TEST_IMAGE):$(params.IMAGE_TAG)" | tee -a ${ARTIFACTS_DIR}/logs/setup.log + echo "CLOUD_PROVIDER_REGION: $(params.CLOUD_PROVIDER_REGION)" | tee -a ${ARTIFACTS_DIR}/logs/setup.log + echo "USE_EXISTING_CLUSTER: $(params.USE_EXISTING_CLUSTER)" | tee -a ${ARTIFACTS_DIR}/logs/setup.log + echo "Setup completed successfully" | tee -a ${ARTIFACTS_DIR}/logs/setup.log securityContext: runAsNonRoot: true @@ -125,9 +130,12 @@ spec: echo "Starting osde2e test execution..." + # Define paths matching Prow structure + ARTIFACTS_DIR="$(workspaces.workspace.path)/artifacts" + # Set test start time TEST_START_TIME=$(date -Iseconds) - echo "Test started at: $TEST_START_TIME" | tee $(workspaces.test-results.path)/logs/test-execution.log + echo "Test started at: $TEST_START_TIME" | tee ${ARTIFACTS_DIR}/logs/test-execution.log # Run osde2e tests with enhanced logging and result collection TEST_EXIT_CODE=0 @@ -136,32 +144,32 @@ spec: --skip-destroy-cluster \ --skip-must-gather \ --configs $(params.OSDE2E_CONFIGS) \ - 2>&1 | tee $(workspaces.test-results.path)/logs/osde2e-full.log || TEST_EXIT_CODE=$? + 2>&1 | tee ${ARTIFACTS_DIR}/logs/osde2e-full.log || TEST_EXIT_CODE=$? # Set test end time TEST_END_TIME=$(date -Iseconds) - echo "Test completed at: $TEST_END_TIME" | tee -a $(workspaces.test-results.path)/logs/test-execution.log - echo "Test exit code: $TEST_EXIT_CODE" | tee -a $(workspaces.test-results.path)/logs/test-execution.log + echo "Test completed at: $TEST_END_TIME" | tee -a ${ARTIFACTS_DIR}/logs/test-execution.log + echo "Test exit code: $TEST_EXIT_CODE" | tee -a ${ARTIFACTS_DIR}/logs/test-execution.log # Determine test status if [ $TEST_EXIT_CODE -eq 0 ]; then echo "PASS" > $(results.test-status.path) - echo "✅ All tests passed" | tee -a $(workspaces.test-results.path)/logs/test-execution.log + echo "✅ All tests passed" | tee -a ${ARTIFACTS_DIR}/logs/test-execution.log else echo "FAIL" > $(results.test-status.path) - echo "❌ Tests failed with exit code: $TEST_EXIT_CODE" | tee -a $(workspaces.test-results.path)/logs/test-execution.log + echo "❌ Tests failed with exit code: $TEST_EXIT_CODE" | tee -a ${ARTIFACTS_DIR}/logs/test-execution.log fi # Create test summary - echo "=== Test Execution Summary ===" > $(workspaces.test-results.path)/logs/summary.log - echo "Start Time: $TEST_START_TIME" >> $(workspaces.test-results.path)/logs/summary.log - echo "End Time: $TEST_END_TIME" >> $(workspaces.test-results.path)/logs/summary.log - echo "Exit Code: $TEST_EXIT_CODE" >> $(workspaces.test-results.path)/logs/summary.log - echo "Status: $(cat $(results.test-status.path))" >> $(workspaces.test-results.path)/logs/summary.log - echo "Config: $(params.OSDE2E_CONFIGS)" >> $(workspaces.test-results.path)/logs/summary.log + echo "=== Test Execution Summary ===" > ${ARTIFACTS_DIR}/logs/summary.log + echo "Start Time: $TEST_START_TIME" >> ${ARTIFACTS_DIR}/logs/summary.log + echo "End Time: $TEST_END_TIME" >> ${ARTIFACTS_DIR}/logs/summary.log + echo "Exit Code: $TEST_EXIT_CODE" >> ${ARTIFACTS_DIR}/logs/summary.log + echo "Status: $(cat $(results.test-status.path))" >> ${ARTIFACTS_DIR}/logs/summary.log + echo "Config: $(params.OSDE2E_CONFIGS)" >> ${ARTIFACTS_DIR}/logs/summary.log # Copy summary to result - cp $(workspaces.test-results.path)/logs/summary.log $(results.test-summary.path) + cp ${ARTIFACTS_DIR}/logs/summary.log $(results.test-summary.path) # Always exit 0 here to allow collect-test-results step to run # The actual test status is stored in test-status result @@ -185,11 +193,13 @@ spec: value: "$(params.CLUSTER_ID)" - name: CAD_PAGERDUTY_ROUTING_KEY value: "$(params.CAD_PAGERDUTY_ROUTING_KEY)" - # osde2e output configuration + # osde2e output configuration - use workspace subdirectories + - name: ARTIFACTS + value: "$(workspaces.workspace.path)/artifacts" - name: REPORT_DIR - value: "$(workspaces.test-results.path)/reports" + value: "$(workspaces.workspace.path)/artifacts" - name: SHARED_DIR - value: "$(workspaces.test-results.path)/shared" + value: "$(workspaces.workspace.path)/shared" # Credentials from Secret and Parameters (Secret takes precedence) - name: OCM_CLIENT_ID valueFrom: @@ -244,38 +254,39 @@ spec: echo "Collecting and processing test results..." - # Process JUnit XML results from osde2e REPORT_DIR - # NOTE: We store summary only in Results (to avoid 4KB limit) - # Full JUnit XML is saved to workspace for later retrieval + # Define paths matching Prow structure + ARTIFACTS_DIR="$(workspaces.workspace.path)/artifacts" + + # Process JUnit XML results from ARTIFACTS directory JUNIT_FOUND=false - # Check for JUnit XML in reports directory (osde2e default location) - if [ -d "$(workspaces.test-results.path)/reports" ]; then - echo "Searching for JUnit XML files in reports directory..." - find $(workspaces.test-results.path)/reports -name "*.xml" -type f > /tmp/junit_files.txt 2>/dev/null || true + # Check for JUnit XML in ARTIFACTS directory + if [ -d "${ARTIFACTS_DIR}" ]; then + echo "Searching for JUnit XML files in ARTIFACTS directory..." + find ${ARTIFACTS_DIR} -name "*.xml" -type f > /tmp/junit_files.txt 2>/dev/null || true if [ -s /tmp/junit_files.txt ]; then echo "Found JUnit XML files" JUNIT_COUNT=$(cat /tmp/junit_files.txt | wc -l) # Merge full results to workspace (not to Result) - cat $(cat /tmp/junit_files.txt) > $(workspaces.test-results.path)/junit/merged-results.xml 2>/dev/null && JUNIT_FOUND=true + cat $(cat /tmp/junit_files.txt) > ${ARTIFACTS_DIR}/junit/merged-results.xml 2>/dev/null && JUNIT_FOUND=true # Store only summary in Result (to avoid size limit) echo "JUnit: Found $JUNIT_COUNT XML file(s)" > $(results.test-results.path) # Log JUnit summary - echo "=== JUnit Results Summary ===" >> $(workspaces.test-results.path)/logs/summary.log - echo "JUnit XML files found: $JUNIT_COUNT" >> $(workspaces.test-results.path)/logs/summary.log - cat /tmp/junit_files.txt >> $(workspaces.test-results.path)/logs/summary.log + echo "=== JUnit Results Summary ===" >> ${ARTIFACTS_DIR}/logs/summary.log + echo "JUnit XML files found: $JUNIT_COUNT" >> ${ARTIFACTS_DIR}/logs/summary.log + cat /tmp/junit_files.txt >> ${ARTIFACTS_DIR}/logs/summary.log fi fi - # Also check for junit.xml in the main reports directory (common osde2e pattern) - if [ -f "$(workspaces.test-results.path)/reports/junit.xml" ]; then - echo "Found junit.xml in reports directory" - cp $(workspaces.test-results.path)/reports/junit.xml $(workspaces.test-results.path)/junit/junit.xml - echo "JUnit: reports/junit.xml" > $(results.test-results.path) + # Also check for junit.xml in the main ARTIFACTS directory (common osde2e pattern) + if [ -f "${ARTIFACTS_DIR}/junit.xml" ]; then + echo "Found junit.xml in ARTIFACTS directory" + cp ${ARTIFACTS_DIR}/junit.xml ${ARTIFACTS_DIR}/junit/junit.xml + echo "JUnit: artifacts/junit.xml" > $(results.test-results.path) JUNIT_FOUND=true fi @@ -283,57 +294,55 @@ spec: if [ "$JUNIT_FOUND" = "false" ]; then echo "No JUnit XML results found" echo "JUnit: No results" > $(results.test-results.path) - echo "No JUnit XML files found" >> $(workspaces.test-results.path)/logs/summary.log + echo "No JUnit XML files found" >> ${ARTIFACTS_DIR}/logs/summary.log fi - # Consolidate all logs to workspace (not to Result to avoid size limit) + # Consolidate all logs to workspace echo "Consolidating test logs..." { echo "=== OSDE2E Test Execution Logs ===" echo "Generated at: $(date -Iseconds)" echo "" - if [ -f "$(workspaces.test-results.path)/logs/setup.log" ]; then + if [ -f "${ARTIFACTS_DIR}/logs/setup.log" ]; then echo "=== Setup Logs ===" - cat $(workspaces.test-results.path)/logs/setup.log + cat ${ARTIFACTS_DIR}/logs/setup.log echo "" fi - if [ -f "$(workspaces.test-results.path)/logs/test-execution.log" ]; then + if [ -f "${ARTIFACTS_DIR}/logs/test-execution.log" ]; then echo "=== Test Execution Summary ===" - cat $(workspaces.test-results.path)/logs/test-execution.log + cat ${ARTIFACTS_DIR}/logs/test-execution.log echo "" fi - if [ -f "$(workspaces.test-results.path)/logs/osde2e-full.log" ]; then + if [ -f "${ARTIFACTS_DIR}/logs/osde2e-full.log" ]; then echo "=== Full OSDE2E Output ===" - cat $(workspaces.test-results.path)/logs/osde2e-full.log + cat ${ARTIFACTS_DIR}/logs/osde2e-full.log fi - # Include osde2e generated logs from REPORT_DIR - if [ -d "$(workspaces.test-results.path)/reports" ]; then - echo "" - echo "=== OSDE2E Generated Files ===" - find $(workspaces.test-results.path)/reports -type f -name "*.log" 2>/dev/null | while read logfile; do - if [ -f "$logfile" ]; then - echo "" - echo "=== $(basename $logfile) ===" - cat "$logfile" 2>/dev/null || echo "Could not read $logfile" - fi - done - - # Include test_output.log if it exists (osde2e default) - if [ -f "$(workspaces.test-results.path)/reports/test_output.log" ]; then + # Include osde2e generated logs from ARTIFACTS directory + echo "" + echo "=== OSDE2E Generated Files ===" + find ${ARTIFACTS_DIR} -type f -name "*.log" 2>/dev/null | while read logfile; do + if [ -f "$logfile" ]; then echo "" - echo "=== OSDE2E Test Output Log ===" - cat $(workspaces.test-results.path)/reports/test_output.log + echo "=== $(basename $logfile) ===" + cat "$logfile" 2>/dev/null || echo "Could not read $logfile" fi + done + + # Include test_output.log if it exists (osde2e default) + if [ -f "${ARTIFACTS_DIR}/test_output.log" ]; then + echo "" + echo "=== OSDE2E Test Output Log ===" + cat ${ARTIFACTS_DIR}/test_output.log fi - } > $(workspaces.test-results.path)/logs/consolidated.log + } > ${ARTIFACTS_DIR}/logs/consolidated.log # Store only summary in Result (to avoid 4KB limit) echo "Logs consolidated at: $(date -Iseconds)" > $(results.test-logs.path) - echo "Full logs available in workspace: logs/consolidated.log" >> $(results.test-logs.path) + echo "Full logs available in workspace: artifacts/logs/consolidated.log" >> $(results.test-logs.path) echo "Test result collection completed successfully" diff --git a/test/e2e/upload-to-s3-task.yml b/test/e2e/upload-to-s3-task.yml index 6c720c5..50ece62 100644 --- a/test/e2e/upload-to-s3-task.yml +++ b/test/e2e/upload-to-s3-task.yml @@ -39,9 +39,8 @@ spec: default: "" workspaces: - - name: test-results - description: Workspace containing test results to upload - mountPath: /workspace/test-results + - name: workspace + description: Combined workspace containing test artifacts to upload results: - name: s3-path @@ -84,14 +83,44 @@ spec: echo "Test Status: $(params.TEST_STATUS)" echo "" - # Upload all files + # Upload all files from workspace artifacts directory with correct content-types + # This ensures files open in browser instead of downloading + ARTIFACTS_DIR="$(workspaces.workspace.path)/artifacts" UPLOAD_STATUS="SUCCESS" - if aws s3 cp /workspace/test-results/ "s3://${S3_BUCKET}/${S3_PREFIX}/" --recursive 2>&1; then - echo "✅ Upload completed" - else - echo "⚠️ Upload had issues" - UPLOAD_STATUS="PARTIAL" - fi + + cd ${ARTIFACTS_DIR} + + # Upload .log and .txt files as text/plain (opens in browser) + echo "Uploading log files..." + find . -type f \( -name "*.log" -o -name "*.txt" \) -exec \ + aws s3 cp {} "s3://${S3_BUCKET}/${S3_PREFIX}/{}" \ + --content-type "text/plain; charset=utf-8" \; 2>&1 || true + + # Upload .xml files as application/xml (opens in browser) + echo "Uploading XML files..." + find . -type f -name "*.xml" -exec \ + aws s3 cp {} "s3://${S3_BUCKET}/${S3_PREFIX}/{}" \ + --content-type "application/xml" \; 2>&1 || true + + # Upload .json files as application/json (opens in browser) + echo "Uploading JSON files..." + find . -type f -name "*.json" -exec \ + aws s3 cp {} "s3://${S3_BUCKET}/${S3_PREFIX}/{}" \ + --content-type "application/json" \; 2>&1 || true + + # Upload .html files as text/html (renders in browser) + echo "Uploading HTML files..." + find . -type f -name "*.html" -exec \ + aws s3 cp {} "s3://${S3_BUCKET}/${S3_PREFIX}/{}" \ + --content-type "text/html" \; 2>&1 || true + + # Upload remaining files with default content-type + echo "Uploading other files..." + find . -type f ! -name "*.log" ! -name "*.txt" ! -name "*.xml" ! -name "*.json" ! -name "*.html" -exec \ + aws s3 cp {} "s3://${S3_BUCKET}/${S3_PREFIX}/{}" \; 2>&1 || true + + cd - > /dev/null + echo "✅ Upload completed" echo "" echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━" diff --git a/test/e2e/view-pipeline-logs.sh b/test/e2e/view-pipeline-logs.sh index 47e041e..692ba5a 100755 --- a/test/e2e/view-pipeline-logs.sh +++ b/test/e2e/view-pipeline-logs.sh @@ -167,24 +167,25 @@ read_logs_from_pods() { read_logs_from_workspace() { echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" - echo -e "${CYAN}Reading logs from Workspace PVC${NC}" + echo -e "${CYAN}Reading logs from Workspace PVC (Prow-compatible paths)${NC}" echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" # Extract PipelineRun short ID RUN_ID=$(echo "$PIPELINERUN_NAME" | rev | cut -d'-' -f1 | rev) - PVC_NAME="osde2e-test-workspace-$RUN_ID" - echo "Looking for PVC: $PVC_NAME" + # Find PVC - could be volumeClaimTemplate generated or pre-created + # volumeClaimTemplate creates PVC with format: pvc- + PVC_NAME=$(oc get pvc -n "$NAMESPACE" -l tekton.dev/pipelineRun="$PIPELINERUN_NAME" -o jsonpath='{.items[0].metadata.name}' 2>/dev/null) - if ! oc get pvc "$PVC_NAME" -n "$NAMESPACE" &>/dev/null; then - echo -e "${RED}Error: PVC '$PVC_NAME' does not exist${NC}" + if [ -z "$PVC_NAME" ]; then + echo -e "${RED}Error: No PVC found for PipelineRun '$PIPELINERUN_NAME'${NC}" return 1 fi - echo -e "${GREEN}Found PVC: $PVC_NAME${NC}" + echo "Found PVC: $PVC_NAME" echo "Creating temporary debug Pod..." - # Create temporary Pod + # Create temporary Pod - mount workspace and access artifacts subdirectory DEBUG_POD="debug-viewer-$(date +%s)" cat </dev/null apiVersion: v1 @@ -218,18 +219,18 @@ EOF echo -e "${GREEN}Debug Pod ready${NC}" echo "" - # List available log files + # List available log files (Prow-compatible path: /workspace/artifacts/logs) echo -e "${CYAN}Available log files:${NC}" - oc exec "$DEBUG_POD" -n "$NAMESPACE" -- find /workspace/test-results/logs -type f 2>/dev/null || true + oc exec "$DEBUG_POD" -n "$NAMESPACE" -- find /workspace/artifacts/logs -type f 2>/dev/null || true echo "" - # Read consolidated log - if oc exec "$DEBUG_POD" -n "$NAMESPACE" -- test -f /workspace/test-results/logs/consolidated.log 2>/dev/null; then + # Read consolidated log from Prow-compatible path + if oc exec "$DEBUG_POD" -n "$NAMESPACE" -- test -f /workspace/artifacts/logs/consolidated.log 2>/dev/null; then echo -e "${CYAN}=== Consolidated Log (consolidated.log) ===${NC}" - oc exec "$DEBUG_POD" -n "$NAMESPACE" -- cat /workspace/test-results/logs/consolidated.log + oc exec "$DEBUG_POD" -n "$NAMESPACE" -- cat /workspace/artifacts/logs/consolidated.log else echo -e "${YELLOW}Warning: consolidated.log not found, showing all available logs:${NC}" - oc exec "$DEBUG_POD" -n "$NAMESPACE" -- sh -c 'for log in /workspace/test-results/logs/*.log; do echo ""; echo "=== $(basename $log) ==="; cat "$log"; done' + oc exec "$DEBUG_POD" -n "$NAMESPACE" -- sh -c 'for log in /workspace/artifacts/logs/*.log; do echo ""; echo "=== $(basename $log) ==="; cat "$log"; done' fi # Cleanup