diff --git a/charts/acs-central/templates/jobs/create-auth-provider.yaml b/charts/acs-central/templates/jobs/create-auth-provider.yaml index 46af4787..ae5dc4fb 100644 --- a/charts/acs-central/templates/jobs/create-auth-provider.yaml +++ b/charts/acs-central/templates/jobs/create-auth-provider.yaml @@ -45,51 +45,50 @@ spec: - | #!/usr/bin/env bash - echo "🔄 Configuring Keycloak OIDC authentication provider..." + echo "Configuring Keycloak OIDC authentication provider..." + + wait_for_central() { + local max_retries=30 + local retry_count=0 + echo "Waiting for ACS Central API to be available..." + until curl -sk -u "admin:$PASSWORD" https://central/v1/ping > /dev/null 2>&1; do + retry_count=$((retry_count + 1)) + if [ $retry_count -ge $max_retries ]; then + echo "ERROR: Timeout waiting for ACS Central API" + return 1 + fi + echo " Retry $retry_count/$max_retries..." + sleep 10 + done + echo "ACS Central API is ready" + } - # Wait for ACS Central to be ready - echo "⏳ Waiting for ACS Central API to be available..." - max_retries=30 - retry_count=0 - until curl -sk -u "admin:$PASSWORD" https://central/v1/ping > /dev/null 2>&1; do - retry_count=$((retry_count + 1)) - if [ $retry_count -ge $max_retries ]; then - echo "❌ Timeout waiting for ACS Central API" - exit 1 - fi - echo " Retry $retry_count/$max_retries..." - sleep 10 - done - echo "✅ ACS Central API is ready" + wait_for_central || exit 1 - # Wait for Keycloak OIDC discovery endpoint to be available - echo "⏳ Waiting for Keycloak OIDC discovery endpoint..." + echo "Waiting for Keycloak OIDC discovery endpoint..." max_retries=30 retry_count=0 until curl -sk "$KEYCLOAK_ISSUER/.well-known/openid-configuration" > /dev/null 2>&1; do retry_count=$((retry_count + 1)) if [ $retry_count -ge $max_retries ]; then - echo "❌ Timeout waiting for Keycloak OIDC discovery endpoint" - echo " Tried: $KEYCLOAK_ISSUER/.well-known/openid-configuration" + echo "ERROR: Timeout waiting for Keycloak OIDC discovery endpoint" + echo " Tried: $KEYCLOAK_ISSUER/.well-known/openid-configuration" exit 1 fi echo " Retry $retry_count/$max_retries..." sleep 10 done - echo "✅ Keycloak OIDC discovery endpoint is ready" + echo "Keycloak OIDC discovery endpoint is ready" - # Check if auth provider already exists AUTH_PROVIDERS=$(curl -sk -u "admin:$PASSWORD" https://central/v1/authProviders) if echo "$AUTH_PROVIDERS" | grep -q "OIDC"; then - echo "✅ OIDC provider already configured" + echo "OIDC provider already configured -- nothing to do" exit 0 fi - # Get ACS Central hostname (without https://) ACS_CENTRAL_HOSTNAME="$(oc get route central -n stackrox -o jsonpath='{.spec.host}')" echo "ACS Central hostname: $ACS_CENTRAL_HOSTNAME" - # Create OIDC provider JSON cat > /tmp/oidc-config.json << 'OIDCEOF' { "name": "OIDC", @@ -111,69 +110,92 @@ spec: } OIDCEOF - # Remove leading spaces from JSON sed -i 's/^ //g' /tmp/oidc-config.json - # Replace placeholders using printf and sed to safely handle special characters - # Escape special sed characters in variables: & \ / and newlines - ACS_CENTRAL_HOSTNAME_ESC=$(printf '%s\n' "$ACS_CENTRAL_HOSTNAME" | sed 's:[&\\/]:\\&:g') - KEYCLOAK_ISSUER_ESC=$(printf '%s\n' "$KEYCLOAK_ISSUER" | sed 's:[&\\/]:\\&:g') - KEYCLOAK_CLIENT_ID_ESC=$(printf '%s\n' "$KEYCLOAK_CLIENT_ID" | sed 's:[&\\/]:\\&:g') - KEYCLOAK_CLIENT_SECRET_ESC=$(printf '%s\n' "$KEYCLOAK_CLIENT_SECRET" | sed 's:[&\\/]:\\&:g') - CLAIM_NAME_ESC=$(printf '%s\n' "$CLAIM_NAME" | sed 's:[&\\/]:\\&:g') - CLAIM_EMAIL_ESC=$(printf '%s\n' "$CLAIM_EMAIL" | sed 's:[&\\/]:\\&:g') - CLAIM_GROUPS_ESC=$(printf '%s\n' "$CLAIM_GROUPS" | sed 's:[&\\/]:\\&:g') - CLAIM_ROLES_ESC=$(printf '%s\n' "$CLAIM_ROLES" | sed 's:[&\\/]:\\&:g') - - sed -i "s|UI_ENDPOINT_PLACEHOLDER|$ACS_CENTRAL_HOSTNAME_ESC|g" /tmp/oidc-config.json - sed -i "s|ISSUER_PLACEHOLDER|$KEYCLOAK_ISSUER_ESC|g" /tmp/oidc-config.json - sed -i "s|CLIENT_ID_PLACEHOLDER|$KEYCLOAK_CLIENT_ID_ESC|g" /tmp/oidc-config.json - sed -i "s|CLIENT_SECRET_PLACEHOLDER|$KEYCLOAK_CLIENT_SECRET_ESC|g" /tmp/oidc-config.json - sed -i "s|CLAIM_NAME_PLACEHOLDER|$CLAIM_NAME_ESC|g" /tmp/oidc-config.json - sed -i "s|CLAIM_EMAIL_PLACEHOLDER|$CLAIM_EMAIL_ESC|g" /tmp/oidc-config.json - sed -i "s|CLAIM_GROUPS_PLACEHOLDER|$CLAIM_GROUPS_ESC|g" /tmp/oidc-config.json - sed -i "s|CLAIM_ROLES_PLACEHOLDER|$CLAIM_ROLES_ESC|g" /tmp/oidc-config.json - - # Debug: Show the JSON payload - echo "📝 OIDC Configuration JSON:" + escape_sed() { printf '%s\n' "$1" | sed 's:[&\\/]:\\&:g'; } + + sed -i "s|UI_ENDPOINT_PLACEHOLDER|$(escape_sed "$ACS_CENTRAL_HOSTNAME")|g" /tmp/oidc-config.json + sed -i "s|ISSUER_PLACEHOLDER|$(escape_sed "$KEYCLOAK_ISSUER")|g" /tmp/oidc-config.json + sed -i "s|CLIENT_ID_PLACEHOLDER|$(escape_sed "$KEYCLOAK_CLIENT_ID")|g" /tmp/oidc-config.json + sed -i "s|CLIENT_SECRET_PLACEHOLDER|$(escape_sed "$KEYCLOAK_CLIENT_SECRET")|g" /tmp/oidc-config.json + sed -i "s|CLAIM_NAME_PLACEHOLDER|$(escape_sed "$CLAIM_NAME")|g" /tmp/oidc-config.json + sed -i "s|CLAIM_EMAIL_PLACEHOLDER|$(escape_sed "$CLAIM_EMAIL")|g" /tmp/oidc-config.json + sed -i "s|CLAIM_GROUPS_PLACEHOLDER|$(escape_sed "$CLAIM_GROUPS")|g" /tmp/oidc-config.json + sed -i "s|CLAIM_ROLES_PLACEHOLDER|$(escape_sed "$CLAIM_ROLES")|g" /tmp/oidc-config.json + + echo "OIDC Configuration JSON:" cat /tmp/oidc-config.json echo "" - # Verify Keycloak discovery endpoint one more time before creating provider - echo "🔍 Verifying Keycloak discovery endpoint..." - curl -sk "$KEYCLOAK_ISSUER/.well-known/openid-configuration" | head -20 - echo "" + # --- Create OIDC auth provider with TLS retry logic --- + # On fresh clusters the proxy trustedCA injection may not have + # propagated to Central's mounted CA bundle yet. If Central + # rejects the Keycloak issuer with "certificate signed by unknown + # authority", restart the Central deployment so it reloads the + # (now-correct) CA bundle, then retry. + create_oidc_provider() { + local http_code + http_code=$(curl -X POST -u "admin:$PASSWORD" -k https://central/v1/authProviders \ + -H "Content-Type: application/json" \ + --data @/tmp/oidc-config.json \ + -w "%{http_code}" \ + -o /tmp/output.json) + + if [ "$http_code" = "200" ]; then + return 0 + fi - echo "📤 Creating auth provider in ACS..." - HTTP_CODE=$(curl -X POST -u "admin:$PASSWORD" -k https://central/v1/authProviders \ - -H "Content-Type: application/json" \ - --data @/tmp/oidc-config.json \ - -w "%{http_code}" \ - -o /tmp/output.json) + local body + body=$(cat /tmp/output.json) + echo "Auth provider creation returned HTTP $http_code: $body" - echo "📥 Response HTTP Code: $HTTP_CODE" - echo "📥 Response Body:" - cat /tmp/output.json - echo "" + if echo "$body" | grep -q "certificate signed by unknown authority"; then + return 2 + fi + return 1 + } + + MAX_TLS_RETRIES=3 + tls_attempt=0 + + echo "Creating auth provider in ACS..." + while true; do + create_oidc_provider + rc=$? - if [ "$HTTP_CODE" != "200" ]; then - echo "❌ Failed to create auth provider (HTTP $HTTP_CODE)" + if [ $rc -eq 0 ]; then + break + fi + + if [ $rc -eq 2 ]; then + tls_attempt=$((tls_attempt + 1)) + if [ $tls_attempt -gt $MAX_TLS_RETRIES ]; then + echo "ERROR: Central still does not trust the ingress CA after $MAX_TLS_RETRIES restart(s)" + exit 1 + fi + echo "Central does not trust the ingress CA yet (attempt $tls_attempt/$MAX_TLS_RETRIES)" + echo "Restarting Central to reload CA bundle..." + oc rollout restart deployment/central -n stackrox + oc rollout status deployment/central -n stackrox --timeout=180s + wait_for_central || exit 1 + continue + fi + + echo "ERROR: Failed to create auth provider" exit 1 - fi + done - # Extract provider ID AUTH_PROVIDER_ID=$(sed 's/,/\n/g' /tmp/output.json | grep -w id | awk -F\" '{ print $4 }') if [ -z "$AUTH_PROVIDER_ID" ]; then - echo "❌ Failed to extract auth provider ID" + echo "ERROR: Failed to extract auth provider ID" cat /tmp/output.json exit 1 fi - echo "✅ Auth provider created with ID: $AUTH_PROVIDER_ID" + echo "Auth provider created with ID: $AUTH_PROVIDER_ID" - # Create admin role mapping for acs-admin role - echo "📝 Creating role mapping: acs-admin → Admin" + echo "Creating role mapping: acs-admin -> Admin" JSON_PAYLOAD="{\"roleName\":\"Admin\",\"props\":{\"authProviderId\":\"$AUTH_PROVIDER_ID\",\"key\":\"roles\",\"value\":\"acs-admin\"}}" HTTP_CODE=$(curl -X POST -u "admin:$PASSWORD" -k https://central/v1/groups \ @@ -182,17 +204,17 @@ spec: -w "%{http_code}" \ -o /tmp/role-mapping.json) - echo "📥 Role mapping response (HTTP $HTTP_CODE):" + echo "Role mapping response (HTTP $HTTP_CODE):" cat /tmp/role-mapping.json echo "" if [ "$HTTP_CODE" = "200" ] || [ "$HTTP_CODE" = "201" ]; then - echo "✅ Admin role mapping created for acs-admin" + echo "Admin role mapping created for acs-admin" else - echo "⚠️ Warning: Failed to create admin role mapping (HTTP $HTTP_CODE, may already exist)" + echo "WARNING: Failed to create admin role mapping (HTTP $HTTP_CODE, may already exist)" fi - echo "🎉 Keycloak OIDC configuration complete" + echo "Keycloak OIDC configuration complete" name: create-auth-provider dnsPolicy: ClusterFirst restartPolicy: Never diff --git a/charts/acs-central/templates/rbac/cluster-init-role.yaml b/charts/acs-central/templates/rbac/cluster-init-role.yaml index 5355d0de..c260f86c 100644 --- a/charts/acs-central/templates/rbac/cluster-init-role.yaml +++ b/charts/acs-central/templates/rbac/cluster-init-role.yaml @@ -33,4 +33,13 @@ rules: - routes verbs: - get - - list \ No newline at end of file + - list + - apiGroups: + - apps + resources: + - deployments + verbs: + - get + - list + - watch + - patch \ No newline at end of file diff --git a/charts/qtodo/templates/_helpers.tpl b/charts/qtodo/templates/_helpers.tpl index 3666c0a0..866e5d3e 100644 --- a/charts/qtodo/templates/_helpers.tpl +++ b/charts/qtodo/templates/_helpers.tpl @@ -1,10 +1,18 @@ {{/* -Create the image path for the passed in image field +Create the image path for the passed in image field. +When global.registry is enabled with domain and repository, the image +reference is derived from global.registry.domain/repository (e.g. +quay.io/ztvp/qtodo) so no VP --set override is needed. */}} {{- define "qtodo.image" -}} +{{- $name := tpl .value.name .context -}} +{{- $useRegistry := default false .useRegistry -}} +{{- if and $useRegistry .context.Values.global.registry.enabled .context.Values.global.registry.domain .context.Values.global.registry.repository -}} +{{- $name = printf "%s/%s" (tpl .context.Values.global.registry.domain .context) .context.Values.global.registry.repository -}} +{{- end -}} {{- if eq (substr 0 7 (tpl .value.version .context)) "sha256:" -}} -{{- printf "%s@%s" (tpl .value.name .context) (tpl .value.version .context) -}} +{{- printf "%s@%s" $name (tpl .value.version .context) -}} {{- else -}} -{{- printf "%s:%s" (tpl .value.name .context) (tpl .value.version .context) -}} +{{- printf "%s:%s" $name (tpl .value.version .context) -}} {{- end -}} {{- end -}} diff --git a/charts/qtodo/templates/app-deployment.yaml b/charts/qtodo/templates/app-deployment.yaml index 41dc0355..8586221e 100644 --- a/charts/qtodo/templates/app-deployment.yaml +++ b/charts/qtodo/templates/app-deployment.yaml @@ -193,7 +193,7 @@ spec: readOnly: true {{- end }} - name: qtodo - image: {{ template "qtodo.image" (dict "value" .Values.app.images.main "context" $) }} + image: {{ template "qtodo.image" (dict "value" .Values.app.images.main "context" $ "useRegistry" true) }} imagePullPolicy: {{ .Values.app.images.main.pullPolicy }} ports: - containerPort: 8080 diff --git a/charts/qtodo/templates/app-serviceaccount.yaml b/charts/qtodo/templates/app-serviceaccount.yaml index 984df984..cac8253d 100644 --- a/charts/qtodo/templates/app-serviceaccount.yaml +++ b/charts/qtodo/templates/app-serviceaccount.yaml @@ -5,7 +5,7 @@ metadata: app: qtodo name: qtodo namespace: qtodo -{{- if .Values.app.images.main.registry.auth }} +{{- if or .Values.app.images.main.registry.auth (and .Values.global.registry.enabled .Values.global.registry.vaultPath) }} imagePullSecrets: - name: {{ .Values.app.images.main.registry.secretName }} {{- end }} \ No newline at end of file diff --git a/charts/qtodo/templates/registry-external-secret.yaml b/charts/qtodo/templates/registry-external-secret.yaml index 8646909d..5ce857c0 100644 --- a/charts/qtodo/templates/registry-external-secret.yaml +++ b/charts/qtodo/templates/registry-external-secret.yaml @@ -1,10 +1,17 @@ -{{- if .Values.app.images.main.registry.auth }} +{{- $regAuth := or .Values.app.images.main.registry.auth (and .Values.global.registry.enabled .Values.global.registry.vaultPath) }} +{{- $regDomain := .Values.app.images.main.registry.domain | default .Values.global.registry.domain }} +{{- $regUser := .Values.app.images.main.registry.user | default .Values.global.registry.user }} +{{- $regVaultPath := .Values.app.images.main.registry.vaultPath | default .Values.global.registry.vaultPath }} +{{- $regPasswordKey := .Values.app.images.main.registry.passwordVaultKey | default .Values.global.registry.passwordVaultKey }} +{{- if $regAuth }} --- apiVersion: "external-secrets.io/v1beta1" kind: ExternalSecret metadata: name: {{ .Values.app.images.main.registry.secretName }} namespace: {{ .Release.Namespace | default .Values.global.namespace }} + annotations: + argocd.argoproj.io/sync-wave: "36" spec: refreshInterval: 15s secretStoreRef: @@ -18,14 +25,14 @@ spec: .dockerconfigjson: | { "auths": { - "{{ .Values.app.images.main.registry.domain | default (printf "quay-registry-quay-quay-enterprise.%s" .Values.global.hubClusterDomain) }}": { - "auth": "{{ `{{ printf "%s:%s" "` }}{{ .Values.app.images.main.registry.user }}{{ `" .password | b64enc }}` }}" + "{{ tpl (required "registry domain is required (set app.images.main.registry.domain or global.registry.domain)" $regDomain) $ }}": { + "auth": "{{ `{{ printf "%s:%s" "` }}{{ $regUser }}{{ `" .password | b64enc }}` }}" } } } data: - secretKey: password remoteRef: - key: {{ .Values.app.images.main.registry.vaultPath }} - property: {{ .Values.app.images.main.registry.passwordVaultKey }} -{{- end }} \ No newline at end of file + key: {{ required "registry vaultPath is required (set app.images.main.registry.vaultPath or global.registry.vaultPath)" $regVaultPath }} + property: {{ required "registry passwordVaultKey is required (set app.images.main.registry.passwordVaultKey or global.registry.passwordVaultKey)" $regPasswordKey }} +{{- end }} diff --git a/charts/qtodo/templates/registry-seed-job.yaml b/charts/qtodo/templates/registry-seed-job.yaml new file mode 100644 index 00000000..68f4aa48 --- /dev/null +++ b/charts/qtodo/templates/registry-seed-job.yaml @@ -0,0 +1,202 @@ +{{- if .Values.app.seedImage.enabled }} +{{- $regDomain := .Values.global.registry.domain }} +{{- $regRepository := .Values.global.registry.repository }} +{{- $isEmbedded := and (hasKey .Values.global.registry "user") (eq (default "" .Values.global.registry.user) "_token") }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: registry-seed + namespace: {{ .Release.Namespace | default "qtodo" }} + annotations: + argocd.argoproj.io/sync-wave: "0" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Namespace | default "qtodo" }}-registry-seed + annotations: + argocd.argoproj.io/sync-wave: "0" +rules: +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "create"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["rolebindings"] + verbs: ["get", "create"] +- apiGroups: ["rbac.authorization.k8s.io"] + resources: ["clusterroles"] + verbs: ["bind"] + resourceNames: ["system:image-builder"] +- apiGroups: ["imageregistry.operator.openshift.io"] + resources: ["configs"] + verbs: ["get", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Namespace | default "qtodo" }}-registry-seed + annotations: + argocd.argoproj.io/sync-wave: "0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Namespace | default "qtodo" }}-registry-seed +subjects: +- kind: ServiceAccount + name: registry-seed + namespace: {{ .Release.Namespace | default "qtodo" }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: registry-seed-image + namespace: {{ .Release.Namespace | default "qtodo" }} + annotations: + argocd.argoproj.io/sync-wave: "5" + argocd.argoproj.io/hook: Sync + argocd.argoproj.io/hook-delete-policy: BeforeHookCreation,HookSucceeded +spec: + backoffLimit: 0 + activeDeadlineSeconds: 600 + template: + spec: + serviceAccountName: registry-seed + restartPolicy: Never +{{- if not $isEmbedded }} + volumes: + - name: registry-auth + secret: + secretName: {{ .Values.app.images.main.registry.secretName }} + optional: true +{{- end }} + containers: + - name: seed + image: {{ .Values.app.seedImage.image }} + env: + - name: SOURCE_IMAGE + value: {{ .Values.app.seedImage.source | quote }} + - name: TAG + value: {{ .Values.app.seedImage.tag | quote }} + - name: REGISTRY_DOMAIN + value: {{ tpl (required "global.registry.domain is required" $regDomain) $ | quote }} + - name: REGISTRY_REPOSITORY + value: {{ $regRepository | quote }} + - name: IS_EMBEDDED + value: {{ $isEmbedded | quote }} +{{- if not $isEmbedded }} + volumeMounts: + - name: registry-auth + mountPath: /var/run/secrets/registry + readOnly: true +{{- end }} + command: + - /bin/sh + - -c + - | + # Best-effort seed: this job must NEVER fail because a Sync hook + # failure triggers ArgoCD retry loops that block the entire app sync. + # The supply-chain pipeline is the authoritative image builder; + # this seed is only an optimisation for first-install UX. + seed_image() { + set -euo pipefail + APISERVER="https://kubernetes.default.svc" + SA_TOKEN="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + CACERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + + TARGET_IMAGE="${REGISTRY_DOMAIN}/${REGISTRY_REPOSITORY}:${TAG}" + + if [ "${IS_EMBEDDED}" = "true" ]; then + PUSH_REGISTRY="image-registry.openshift-image-registry.svc:5000" + PUSH_IMAGE="${PUSH_REGISTRY}/${REGISTRY_REPOSITORY}:${TAG}" + + IMAGE_NS=$(echo "${REGISTRY_REPOSITORY}" | cut -d/ -f1) + echo "Ensuring namespace '${IMAGE_NS}' exists..." + NS_BODY="{\"apiVersion\":\"v1\",\"kind\":\"Namespace\",\"metadata\":{\"name\":\"${IMAGE_NS}\"}}" + HTTP=$(curl -sS -o /dev/null -w "%{http_code}" --cacert "${CACERT}" \ + -H "Authorization: Bearer ${SA_TOKEN}" \ + -H "Content-Type: application/json" \ + -X POST -d "${NS_BODY}" \ + "${APISERVER}/api/v1/namespaces" 2>/dev/null) + case "${HTTP}" in + 20[0-9]|409) echo "Namespace ready (HTTP ${HTTP})." ;; + *) echo "WARNING: namespace create returned HTTP ${HTTP}, continuing..." ;; + esac + + echo "Granting system:image-builder to registry-seed SA..." + RB_BODY="{\"apiVersion\":\"rbac.authorization.k8s.io/v1\",\"kind\":\"RoleBinding\",\"metadata\":{\"name\":\"seed-image-builder\",\"namespace\":\"${IMAGE_NS}\"},\"roleRef\":{\"apiGroup\":\"rbac.authorization.k8s.io\",\"kind\":\"ClusterRole\",\"name\":\"system:image-builder\"},\"subjects\":[{\"kind\":\"ServiceAccount\",\"name\":\"registry-seed\",\"namespace\":\"{{ .Release.Namespace | default "qtodo" }}\"}]}" + HTTP=$(curl -sS -o /dev/null -w "%{http_code}" --cacert "${CACERT}" \ + -H "Authorization: Bearer ${SA_TOKEN}" \ + -H "Content-Type: application/json" \ + -X POST -d "${RB_BODY}" \ + "${APISERVER}/apis/rbac.authorization.k8s.io/v1/namespaces/${IMAGE_NS}/rolebindings" 2>/dev/null) + case "${HTTP}" in + 20[0-9]|409) echo "RoleBinding ready (HTTP ${HTTP})." ;; + *) echo "WARNING: RoleBinding create returned HTTP ${HTTP}, continuing..." ;; + esac + + mkdir -p /tmp/auth + AUTH_TOKEN=$(echo -n "ignored:${SA_TOKEN}" | base64 -w0) + printf '{"auths":{"%s":{"auth":"%s"}}}' "${PUSH_REGISTRY}" "${AUTH_TOKEN}" > /tmp/auth/config.json + AUTH_ARGS="-a /tmp/auth/config.json" + else + PUSH_REGISTRY="${REGISTRY_DOMAIN}" + PUSH_IMAGE="${TARGET_IMAGE}" + + if [ -f /var/run/secrets/registry/.dockerconfigjson ]; then + AUTH_ARGS="-a /var/run/secrets/registry/.dockerconfigjson" + else + echo "INFO: Registry auth secret not yet available (expected on first install)." + echo "Skipping seed -- the supply-chain pipeline will push the image later." + return 0 + fi + fi + + echo "Waiting for registry at ${PUSH_REGISTRY} to become reachable..." + MAX_WAIT=480 + WAITED=0 + while [ ${WAITED} -lt ${MAX_WAIT} ]; do + if [ "${IS_EMBEDDED}" = "true" ]; then + HTTP=$(curl -sk -o /dev/null -w "%{http_code}" \ + -H "Authorization: Bearer ${SA_TOKEN}" \ + "https://${PUSH_REGISTRY}/v2/" 2>/dev/null) + else + HTTP=$(curl -sk -o /dev/null -w "%{http_code}" "https://${PUSH_REGISTRY}/v2/" 2>/dev/null) + fi + if [ "${HTTP}" = "200" ] || [ "${HTTP}" = "401" ] || [ "${HTTP}" = "301" ]; then + echo "Registry is reachable (HTTP ${HTTP})." + break + fi + echo "Registry not ready (HTTP ${HTTP}), retrying in 15s... (${WAITED}/${MAX_WAIT}s)" + sleep 15 + WAITED=$((WAITED + 15)) + done + if [ ${WAITED} -ge ${MAX_WAIT} ]; then + echo "WARNING: Registry not reachable after ${MAX_WAIT}s." + return 1 + fi + + echo "Checking if image already exists at ${PUSH_IMAGE}..." + if oc image info "${PUSH_IMAGE}" --insecure=true 2>/dev/null; then + echo "Image already exists, skipping seed." + return 0 + fi + + echo "Mirroring ${SOURCE_IMAGE} -> ${PUSH_IMAGE}..." + oc image mirror "${SOURCE_IMAGE}" "${PUSH_IMAGE}" \ + --insecure=true --skip-missing=true \ + ${AUTH_ARGS} \ + --filter-by-os="linux/amd64" \ + --keep-manifest-list=false + + echo "Seed image push complete." + } + + if seed_image; then + echo "SUCCESS: Seed image job finished." + else + echo "WARNING: Seed image job could not push the image (best-effort)." + echo "The supply-chain pipeline will build and push the image later." + fi + exit 0 +{{- end }} diff --git a/charts/qtodo/values.yaml b/charts/qtodo/values.yaml index 90494f40..cd70b59b 100644 --- a/charts/qtodo/values.yaml +++ b/charts/qtodo/values.yaml @@ -4,6 +4,13 @@ global: secretStore: name: "vault-backend" kind: "ClusterSecretStore" + registry: + enabled: false + domain: "" + repository: "" + user: "" + vaultPath: "" + passwordVaultKey: "" # QTodo application configuration app: @@ -15,13 +22,16 @@ app: # Modified to Always to force a pull so we can test changes to the container image without requiring manual deletion of images or restarts of argo pullPolicy: Always registry: + # Set to true to create registry auth secret for image pulls + # (also enabled when global.registry.enabled=true) auth: false secretName: qtodo-registry-auth - user: quay-user - # domain: quay-registry-quay-quay-enterprise.apps.example.com - # Registry credentials - stored in quay path - vaultPath: secret/data/hub/infra/quay/quay-users - passwordVaultKey: quay-user-password + # Falls back to global.registry.user + user: "" + # domain: registry.example.com # REQUIRED when auth is enabled + # Vault path and key for registry password (set for your scenario) + vaultPath: "" + passwordVaultKey: "" spiffeHelper: name: registry.redhat.io/zero-trust-workload-identity-manager/spiffe-helper-rhel9 version: v0.10.0 @@ -73,6 +83,14 @@ app: # QTodo OIDC secret path (app-level isolation) vaultPath: "secret/data/apps/qtodo/qtodo-oidc-client" + # Seed image Job: mirrors the upstream qtodo image into the configured + # registry so the deployment can pull before the supply-chain pipeline runs. + seedImage: + enabled: false + source: "quay.io/validatedpatterns/qtodo:latest" + tag: "latest" + image: "registry.redhat.io/openshift4/ose-tools-rhel9:latest" + # Truststore configuration for Java CA certificates (PKCS12 format) truststore: enabled: true diff --git a/charts/supply-chain/files/refresh_registry_token.sh b/charts/supply-chain/files/refresh_registry_token.sh new file mode 100755 index 00000000..a53831f4 --- /dev/null +++ b/charts/supply-chain/files/refresh_registry_token.sh @@ -0,0 +1,106 @@ +#!/bin/sh +set -eu + +VAULT_URL="${VAULT_URL:?VAULT_URL is required}" +VAULT_ROLE="${VAULT_ROLE:-supply-chain}" +VAULT_SECRET_PATH="${VAULT_SECRET_PATH:?VAULT_SECRET_PATH is required}" +VAULT_SECRET_KEY="${VAULT_SECRET_KEY:?VAULT_SECRET_KEY is required}" +SA_NAME="${SA_NAME:-pipeline}" +SA_NAMESPACE="${SA_NAMESPACE:?SA_NAMESPACE is required}" +TOKEN_DURATION="${TOKEN_DURATION:-172800}" +JWT_TOKEN_FILE="${JWT_TOKEN_FILE:-/svids/jwt.token}" +CA_CERT="${CA_CERT:-/run/secrets/kubernetes.io/serviceaccount/service-ca.crt}" + +APISERVER="https://kubernetes.default.svc" +SA_TOKEN="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" +CACERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + +log() { echo "[$(date '+%H:%M:%S')] $*"; } + +vault_curl() { + if [ -f "${CA_CERT}" ]; then + curl -sS --cacert "${CA_CERT}" "$@" + else + curl -sSk "$@" + fi +} + +log "Starting OpenShift registry token refresh" + +# 1. Read SPIFFE JWT for Vault authentication +if [ ! -f "${JWT_TOKEN_FILE}" ]; then + log "ERROR: JWT token file not found at ${JWT_TOKEN_FILE}" + exit 1 +fi +JWT="$(cat "${JWT_TOKEN_FILE}")" +log "Read SPIFFE JWT from ${JWT_TOKEN_FILE}" + +# 2. Authenticate to Vault using SPIFFE JWT (with retry for seed Job timing) +VAULT_MAX_RETRIES="${VAULT_MAX_RETRIES:-20}" +VAULT_RETRY_INTERVAL="${VAULT_RETRY_INTERVAL:-15}" +VAULT_TOKEN="" + +log "Authenticating to Vault at ${VAULT_URL} with role ${VAULT_ROLE}..." +attempt=0 +while [ "${attempt}" -lt "${VAULT_MAX_RETRIES}" ]; do + AUTH_RESP=$(vault_curl -X POST "${VAULT_URL}/v1/auth/jwt/login" \ + -H "Content-Type: application/json" \ + -d "{\"role\":\"${VAULT_ROLE}\",\"jwt\":\"${JWT}\"}" 2>&1) || true + + VAULT_TOKEN=$(echo "${AUTH_RESP}" | python3 -c "import sys,json; print(json.load(sys.stdin)['auth']['client_token'])" 2>/dev/null) || true + + if [ -n "${VAULT_TOKEN}" ]; then + break + fi + + attempt=$((attempt + 1)) + if [ "${attempt}" -lt "${VAULT_MAX_RETRIES}" ]; then + log "Vault not ready (attempt ${attempt}/${VAULT_MAX_RETRIES}). Retrying in ${VAULT_RETRY_INTERVAL}s..." + sleep "${VAULT_RETRY_INTERVAL}" + fi +done + +if [ -z "${VAULT_TOKEN}" ]; then + log "ERROR: Vault authentication failed after ${VAULT_MAX_RETRIES} attempts" + log "${AUTH_RESP}" + exit 1 +fi +log "Vault authentication successful" + +# 3. Create a fresh SA token via the Kubernetes TokenRequest API +log "Creating ${SA_NAME} SA token (duration: ${TOKEN_DURATION}s)..." +TOKEN_RESP=$(curl -sS --cacert "${CACERT}" \ + -X POST "${APISERVER}/api/v1/namespaces/${SA_NAMESPACE}/serviceaccounts/${SA_NAME}/token" \ + -H "Authorization: Bearer ${SA_TOKEN}" \ + -H "Content-Type: application/json" \ + -d "{\"apiVersion\":\"authentication.k8s.io/v1\",\"kind\":\"TokenRequest\",\"spec\":{\"expirationSeconds\":${TOKEN_DURATION}}}") + +NEW_TOKEN=$(echo "${TOKEN_RESP}" | python3 -c "import sys,json; print(json.load(sys.stdin)['status']['token'])" 2>/dev/null) || { + log "ERROR: TokenRequest API failed" + log "${TOKEN_RESP}" + exit 1 +} +log "SA token created successfully" + +# 4. Write the new token to Vault +log "Writing token to Vault at ${VAULT_SECRET_PATH}..." +WRITE_RESP=$(vault_curl -X POST "${VAULT_URL}/v1/${VAULT_SECRET_PATH}" \ + -H "X-Vault-Token: ${VAULT_TOKEN}" \ + -H "Content-Type: application/json" \ + -d "{\"data\":{\"${VAULT_SECRET_KEY}\":\"${NEW_TOKEN}\"}}") + +# Check for errors in the response +echo "${WRITE_RESP}" | python3 -c " +import sys, json +resp = json.load(sys.stdin) +if 'errors' in resp and resp['errors']: + print('ERROR: ' + str(resp['errors']), file=sys.stderr) + sys.exit(1) +" || { + log "ERROR: Failed to write token to Vault" + log "${WRITE_RESP}" + exit 1 +} + +log "Token successfully written to Vault at ${VAULT_SECRET_PATH}" +log "Registry token refresh complete" diff --git a/charts/supply-chain/templates/pipeline-qtodo.yaml b/charts/supply-chain/templates/pipeline-qtodo.yaml index 13ae2c8c..5c4b6385 100644 --- a/charts/supply-chain/templates/pipeline-qtodo.yaml +++ b/charts/supply-chain/templates/pipeline-qtodo.yaml @@ -25,7 +25,7 @@ spec: - name: image-target type: string description: qtodo image push destination (e.g. quay.io/ztvp/qtodo:latest) - default: {{ .Values.registry.domain | default (printf "quay-registry-quay-quay-enterprise.%s" .Values.global.hubClusterDomain) }}/{{ .Values.registry.org }}/{{ .Values.registry.repo }}:{{ .Values.qtodo.tag }} + default: {{ tpl (required "registry.domain (or global.registry.domain) is required" (.Values.registry.domain | default .Values.global.registry.domain)) $ }}/{{ .Values.registry.repository | default .Values.global.registry.repository }}:{{ .Values.qtodo.tag }} - name: image-tls-verify type: string description: Whether to verify TLS when pushing to the OCI registry @@ -308,4 +308,14 @@ spec: - name: oidc-identity value: $(params.oidc-identity) - name: oidc-issuer - value: $(params.oidc-issuer) \ No newline at end of file + value: $(params.oidc-issuer) + + finally: + - name: restart-qtodo + when: + - input: $(tasks.qtodo-verify-image.status) + operator: in + values: ["Succeeded"] + taskRef: + name: restart-qtodo + kind: Task \ No newline at end of file diff --git a/charts/supply-chain/templates/pipelinerun-qtodo.yaml b/charts/supply-chain/templates/pipelinerun-qtodo.yaml new file mode 100644 index 00000000..36b52d4b --- /dev/null +++ b/charts/supply-chain/templates/pipelinerun-qtodo.yaml @@ -0,0 +1,112 @@ +{{- if .Values.pipelinerun.enabled }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pipelinerun-launcher + namespace: {{ .Values.global.namespace }} + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: BeforeHookCreation +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pipelinerun-launcher + namespace: {{ .Values.global.namespace }} + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: BeforeHookCreation +rules: +- apiGroups: ["tekton.dev"] + resources: ["pipelineruns"] + verbs: ["create"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + resourceNames: [{{ .Values.registry.authSecretName | quote }}] +- apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get"] + resourceNames: ["qtodo-workspace-source"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipelinerun-launcher + namespace: {{ .Values.global.namespace }} + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: BeforeHookCreation +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipelinerun-launcher +subjects: +- kind: ServiceAccount + name: pipelinerun-launcher + namespace: {{ .Values.global.namespace }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: launch-qtodo-pipeline + namespace: {{ .Values.global.namespace }} + annotations: + argocd.argoproj.io/hook: PostSync + argocd.argoproj.io/hook-delete-policy: BeforeHookCreation +spec: + backoffLimit: 2 + activeDeadlineSeconds: 600 + template: + spec: + serviceAccountName: pipelinerun-launcher + restartPolicy: Never + containers: + - name: launcher + image: registry.redhat.io/openshift4/ose-tools-rhel9:latest + env: + - name: REGISTRY_DOMAIN + value: {{ tpl (default "" .Values.global.registry.domain) $ | quote }} + command: + - /bin/sh + - -ce + - | + if [ -n "${REGISTRY_DOMAIN}" ]; then + echo "Waiting for registry ${REGISTRY_DOMAIN} to become ready..." + MAX_WAIT=480 + WAITED=0 + while [ ${WAITED} -lt ${MAX_WAIT} ]; do + HTTP=$(curl -sk -o /dev/null -w "%{http_code}" "https://${REGISTRY_DOMAIN}/v2/" 2>/dev/null) + if [ "${HTTP}" = "200" ] || [ "${HTTP}" = "401" ] || [ "${HTTP}" = "301" ]; then + echo "Registry is ready (HTTP ${HTTP})." + break + fi + echo "Registry not ready (HTTP ${HTTP}), retrying in 15s... (${WAITED}/${MAX_WAIT}s)" + sleep 15 + WAITED=$((WAITED + 15)) + done + if [ ${WAITED} -ge ${MAX_WAIT} ]; then + echo "ERROR: Registry not ready after ${MAX_WAIT}s, launching pipeline anyway." + fi + fi + + cat <<'MANIFEST' | oc create -f - + apiVersion: tekton.dev/v1beta1 + kind: PipelineRun + metadata: + generateName: qtodo-supply-chain- + namespace: {{ .Values.global.namespace }} + spec: + pipelineRef: + name: qtodo-supply-chain + workspaces: + - name: qtodo-source + persistentVolumeClaim: + claimName: qtodo-workspace-source + - name: registry-auth-config + secret: + secretName: {{ .Values.registry.authSecretName }} + MANIFEST + echo "PipelineRun created successfully." +{{- end }} diff --git a/charts/supply-chain/templates/quay/quay-user-job.yaml b/charts/supply-chain/templates/quay/quay-user-job.yaml index 417afcc7..5d97bd8f 100644 --- a/charts/supply-chain/templates/quay/quay-user-job.yaml +++ b/charts/supply-chain/templates/quay/quay-user-job.yaml @@ -19,9 +19,9 @@ spec: command: ["python3", "/app/create_user.py"] env: - name: QUAY_HOST - value: {{ .Values.registry.domain | default (printf "quay-registry-quay-quay-enterprise.%s" .Values.global.hubClusterDomain) }} + value: {{ tpl (.Values.registry.domain | default .Values.global.registry.domain | default (printf "quay-registry-quay-quay-enterprise.%s" .Values.global.hubClusterDomain)) $ }} - name: QUAY_ADMIN_USER - value: {{ .Values.registry.user }} + value: {{ .Values.registry.user | default .Values.global.registry.user }} - name: QUAY_ADMIN_PASSWORD valueFrom: secretKeyRef: diff --git a/charts/supply-chain/templates/rbac/pipeline-qtodo-restarter.yaml b/charts/supply-chain/templates/rbac/pipeline-qtodo-restarter.yaml new file mode 100644 index 00000000..5eafeefd --- /dev/null +++ b/charts/supply-chain/templates/rbac/pipeline-qtodo-restarter.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pipeline-qtodo-restarter + namespace: qtodo +rules: +- apiGroups: ["apps"] + resources: ["deployments"] + resourceNames: ["qtodo"] + verbs: ["get", "patch"] +- apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["list"] +- apiGroups: ["apps"] + resources: ["replicasets"] + verbs: ["list", "get"] +- apiGroups: [""] + resources: ["pods"] + verbs: ["list", "get", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipeline-qtodo-restarter + namespace: qtodo +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pipeline-qtodo-restarter +subjects: +- kind: ServiceAccount + name: pipeline + namespace: {{ .Values.global.namespace }} diff --git a/charts/supply-chain/templates/rbac/registry-image-namespace.yaml b/charts/supply-chain/templates/rbac/registry-image-namespace.yaml new file mode 100644 index 00000000..6602279d --- /dev/null +++ b/charts/supply-chain/templates/rbac/registry-image-namespace.yaml @@ -0,0 +1,122 @@ +{{- if and (index .Values.registry "embeddedOpenShift") (index .Values.registry.embeddedOpenShift "ensureImageNamespaceRBAC") }} +{{- $repository := .Values.registry.repository | default .Values.global.registry.repository }} +{{- $imageNamespace := index (splitList "/" $repository) 0 }} +# When using the embedded OpenShift image registry, the pipeline pushes to a +# namespace that matches the first path component of registry.repository +# (e.g. "ztvp" from "ztvp/qtodo"). This ensures that namespace exists and the +# pipeline SA has system:image-builder so the push succeeds. +--- +apiVersion: v1 +kind: Namespace +metadata: + name: {{ $imageNamespace }} + annotations: + argocd.argoproj.io/sync-wave: "0" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pipeline-image-builder + namespace: {{ $imageNamespace }} + annotations: + argocd.argoproj.io/sync-wave: "0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:image-builder +subjects: + - kind: ServiceAccount + name: {{ .Values.pipelineServiceAccount }} + namespace: {{ .Values.global.namespace }} +--- +# Enable the default route on the embedded OpenShift image registry so that +# the pipeline can push and external clients can pull images via the route. +# Uses a Job because the imageregistry config is a cluster-singleton managed +# by the image-registry operator; declarative ownership would conflict. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Values.registry.embeddedOpenShift.routeEnablerServiceAccount }} + namespace: {{ .Values.global.namespace }} + annotations: + argocd.argoproj.io/sync-wave: "0" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Values.global.namespace }}-registry-route-enabler + annotations: + argocd.argoproj.io/sync-wave: "0" +rules: +- apiGroups: ["imageregistry.operator.openshift.io"] + resources: ["configs"] + verbs: ["get", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.global.namespace }}-registry-route-enabler + annotations: + argocd.argoproj.io/sync-wave: "0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.global.namespace }}-registry-route-enabler +subjects: +- kind: ServiceAccount + name: {{ .Values.registry.embeddedOpenShift.routeEnablerServiceAccount }} + namespace: {{ .Values.global.namespace }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: enable-registry-default-route + namespace: {{ .Values.global.namespace }} + annotations: + argocd.argoproj.io/sync-wave: "1" + argocd.argoproj.io/hook: Sync + argocd.argoproj.io/hook-delete-policy: HookSucceeded +spec: + backoffLimit: 3 + template: + spec: + serviceAccountName: {{ .Values.registry.embeddedOpenShift.routeEnablerServiceAccount }} + restartPolicy: Never + containers: + - name: enable-route + image: {{ .Values.images.ubi }} + command: + - /bin/sh + - -ce + - | + APISERVER="https://kubernetes.default.svc" + TOKEN="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" + CACERT="/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + RESOURCE_URL="${APISERVER}/apis/imageregistry.operator.openshift.io/v1/configs/cluster" + AUTH_HEADER="Authorization: Bearer ${TOKEN}" + + echo "Checking current defaultRoute status..." + BODY=$(curl -sS --cacert "${CACERT}" -H "${AUTH_HEADER}" "${RESOURCE_URL}") + rc=$?; if [ $rc -ne 0 ]; then echo "ERROR: GET failed (curl rc=${rc})"; exit 1; fi + + # Parse defaultRoute from JSON without jq/grep dependency + case "${BODY}" in + *'"defaultRoute":true'*) echo "Default route already enabled, nothing to do."; exit 0 ;; + esac + + echo "Enabling default route on embedded OpenShift image registry..." + RESP=$(curl -sS -w "\n%{http_code}" --cacert "${CACERT}" \ + -H "${AUTH_HEADER}" \ + -H "Content-Type: application/merge-patch+json" \ + -X PATCH -d '{"spec":{"defaultRoute":true}}' \ + "${RESOURCE_URL}") + HTTP_CODE=$(echo "${RESP}" | tail -1) + + if [ "${HTTP_CODE}" -ge 200 ] 2>/dev/null && [ "${HTTP_CODE}" -lt 300 ] 2>/dev/null; then + echo "Default route enabled successfully (HTTP ${HTTP_CODE})." + else + echo "ERROR: PATCH failed (HTTP ${HTTP_CODE})." + echo "${RESP}" | head -5 + exit 1 + fi +{{- end }} diff --git a/charts/supply-chain/templates/rbac/registry-token-refresher.yaml b/charts/supply-chain/templates/rbac/registry-token-refresher.yaml new file mode 100644 index 00000000..40fd7e58 --- /dev/null +++ b/charts/supply-chain/templates/rbac/registry-token-refresher.yaml @@ -0,0 +1,225 @@ +{{- if and (index .Values.registry "embeddedOpenShift") (index .Values.registry.embeddedOpenShift "tokenRefresher") (index .Values.registry.embeddedOpenShift.tokenRefresher "enabled") }} +{{- $regVaultPath := .Values.registry.vaultPath | default .Values.global.registry.vaultPath }} +{{- $regPasswordKey := .Values.registry.passwordVaultKey | default .Values.global.registry.passwordVaultKey }} +# ============================================================ +# OpenShift Registry Token Refresher +# Periodically creates a fresh pipeline SA token and writes it +# to Vault so the ExternalSecret-based dockerconfigjson stays +# valid for the embedded OpenShift image registry. +# ============================================================ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: registry-token-refresher-script + namespace: {{ .Values.global.namespace }} +data: + refresh_registry_token.sh: | +{{- .Files.Get "files/refresh_registry_token.sh" | nindent 4 }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: registry-token-refresher-spiffe-config + namespace: {{ .Values.global.namespace }} +data: + helper.conf: |- + agent_address = "{{ .Values.spire.endpointSocketPath }}" + cmd = "" + cmd_args = "" + cert_dir = "/svids" + renew_signal = "" + svid_file_name = "svid.pem" + svid_key_file_name = "svid_key.pem" + svid_bundle_file_name = "svid_bundle.pem" + jwt_svids = [{jwt_audience="supply-chain", jwt_svid_file_name="jwt.token"}] + jwt_bundle_file_name = "jwt_bundle.json" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: registry-token-refresher + namespace: {{ .Values.global.namespace }} +rules: +- apiGroups: [""] + resources: ["serviceaccounts/token"] + resourceNames: [{{ .Values.pipelineServiceAccount | quote }}] + verbs: ["create"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: registry-token-refresher + namespace: {{ .Values.global.namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: registry-token-refresher +subjects: +- kind: ServiceAccount + name: {{ .Values.pipelineServiceAccount }} + namespace: {{ .Values.global.namespace }} +--- +apiVersion: batch/v1 +kind: CronJob +metadata: + name: registry-token-refresher + namespace: {{ .Values.global.namespace }} +spec: + schedule: {{ .Values.registry.embeddedOpenShift.tokenRefresher.schedule | quote }} + concurrencyPolicy: Forbid + jobTemplate: + spec: + backoffLimit: 3 + template: + spec: + serviceAccountName: {{ .Values.pipelineServiceAccount }} + restartPolicy: Never + initContainers: + - name: fetch-spiffe-jwt + image: {{ .Values.images.spiffeHelper }} + imagePullPolicy: IfNotPresent + args: + - '-config' + - /etc/helper.conf + - '-daemon-mode=false' + volumeMounts: + - name: spiffe-helper-config + readOnly: true + mountPath: /etc/helper.conf + subPath: helper.conf + - name: spiffe-workload-api + readOnly: true + mountPath: /spiffe-workload-api + - name: svids + mountPath: /svids + containers: + - name: refresh-token + image: {{ .Values.images.ubi }} + command: ["sh", "/app/refresh_registry_token.sh"] + env: + - name: VAULT_URL + value: https://vault.vault.svc.cluster.local:8200 + - name: VAULT_ROLE + value: supply-chain + - name: VAULT_SECRET_PATH + value: {{ required "registry vaultPath is required" $regVaultPath }} + - name: VAULT_SECRET_KEY + value: {{ required "registry passwordVaultKey is required" $regPasswordKey }} + - name: SA_NAME + value: {{ .Values.pipelineServiceAccount }} + - name: SA_NAMESPACE + value: {{ .Values.global.namespace }} + - name: TOKEN_DURATION + value: {{ .Values.registry.embeddedOpenShift.tokenRefresher.tokenDuration | quote }} + - name: JWT_TOKEN_FILE + value: /svids/jwt.token + - name: CA_CERT + value: /run/secrets/kubernetes.io/serviceaccount/service-ca.crt + volumeMounts: + - name: svids + readOnly: true + mountPath: /svids + - name: script-volume + readOnly: true + mountPath: /app + volumes: + - name: spiffe-workload-api + csi: + driver: csi.spiffe.io + readOnly: true + - name: spiffe-helper-config + configMap: + name: registry-token-refresher-spiffe-config + defaultMode: 420 + - name: svids + emptyDir: {} + - name: script-volume + configMap: + name: registry-token-refresher-script + defaultMode: 0755 +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: registry-token-refresher-seed + namespace: {{ .Values.global.namespace }} + annotations: + # Run after wave 0 (ConfigMaps, RBAC) and wave 1 (enable-registry-default-route hook), + # but before the qtodo-registry-auth ExternalSecret at wave 15. + argocd.argoproj.io/sync-wave: "10" + argocd.argoproj.io/hook: Sync + # HookSucceeded: remove completed Job so Argo does not sit on a stale object; BeforeHookCreation: clean retry/sync. + argocd.argoproj.io/hook-delete-policy: HookSucceeded,BeforeHookCreation +spec: + backoffLimit: 3 + # Avoid an indefinite sync if Vault/SPIFFE never become ready (Argo waits for hook completion). + activeDeadlineSeconds: 900 + template: + spec: + serviceAccountName: {{ .Values.pipelineServiceAccount }} + restartPolicy: Never + initContainers: + - name: fetch-spiffe-jwt + image: {{ .Values.images.spiffeHelper }} + imagePullPolicy: IfNotPresent + args: + - '-config' + - /etc/helper.conf + - '-daemon-mode=false' + volumeMounts: + - name: spiffe-helper-config + readOnly: true + mountPath: /etc/helper.conf + subPath: helper.conf + - name: spiffe-workload-api + readOnly: true + mountPath: /spiffe-workload-api + - name: svids + mountPath: /svids + containers: + - name: refresh-token + image: {{ .Values.images.ubi }} + command: ["sh", "/app/refresh_registry_token.sh"] + env: + - name: VAULT_URL + value: https://vault.vault.svc.cluster.local:8200 + - name: VAULT_ROLE + value: supply-chain + - name: VAULT_SECRET_PATH + value: {{ required "registry vaultPath is required" $regVaultPath }} + - name: VAULT_SECRET_KEY + value: {{ required "registry passwordVaultKey is required" $regPasswordKey }} + - name: SA_NAME + value: {{ .Values.pipelineServiceAccount }} + - name: SA_NAMESPACE + value: {{ .Values.global.namespace }} + - name: TOKEN_DURATION + value: {{ .Values.registry.embeddedOpenShift.tokenRefresher.tokenDuration | quote }} + - name: JWT_TOKEN_FILE + value: /svids/jwt.token + - name: CA_CERT + value: /run/secrets/kubernetes.io/serviceaccount/service-ca.crt + volumeMounts: + - name: svids + readOnly: true + mountPath: /svids + - name: script-volume + readOnly: true + mountPath: /app + volumes: + - name: spiffe-workload-api + csi: + driver: csi.spiffe.io + readOnly: true + - name: spiffe-helper-config + configMap: + name: registry-token-refresher-spiffe-config + defaultMode: 420 + - name: svids + emptyDir: {} + - name: script-volume + configMap: + name: registry-token-refresher-script + defaultMode: 0755 +{{- end }} diff --git a/charts/supply-chain/templates/secrets/qtodo-quay-pass.yaml b/charts/supply-chain/templates/secrets/qtodo-quay-pass.yaml new file mode 100644 index 00000000..22a89ee3 --- /dev/null +++ b/charts/supply-chain/templates/secrets/qtodo-quay-pass.yaml @@ -0,0 +1,31 @@ +{{/* + Quay User Provisioner Secret + Purpose: Provides password for the Quay user provisioner job to create/update users in built-in Quay + Used by: quay-user-job.yaml (CronJob that provisions Quay users) + Only created when: quay.enabled=true (built-in Quay registry) + Uses unified registry.vaultPath and registry.passwordVaultKey +*/}} +{{- if eq .Values.quay.enabled true }} +--- +apiVersion: "external-secrets.io/v1beta1" +kind: ExternalSecret +metadata: + name: qtodo-quay-password + namespace: {{ .Release.Namespace | default .Values.global.namespace }} +spec: + refreshInterval: 15s + secretStoreRef: + name: {{ .Values.global.secretStore.name }} + kind: {{ .Values.global.secretStore.kind }} + target: + name: qtodo-quay-password + template: + type: Opaque + data: + password: "{{ `{{ .password }}` }}" + data: + - secretKey: password + remoteRef: + key: {{ .Values.registry.vaultPath | default .Values.global.registry.vaultPath }} + property: {{ .Values.registry.passwordVaultKey | default .Values.global.registry.passwordVaultKey }} +{{- end }} diff --git a/charts/supply-chain/templates/secrets/qtodo-registry-auth.yaml b/charts/supply-chain/templates/secrets/qtodo-registry-auth.yaml index 416e8020..6773fb99 100644 --- a/charts/supply-chain/templates/secrets/qtodo-registry-auth.yaml +++ b/charts/supply-chain/templates/secrets/qtodo-registry-auth.yaml @@ -1,29 +1,46 @@ +{{/* + Pipeline Registry Auth Secret + Purpose: Provides dockerconfigjson for pipeline to push/pull images + Used by: Tekton pipeline tasks (build-image, sign-image, verify-image) + Created when: registry.enabled=true + Registry-agnostic: works for built-in Quay, BYO (quay.io, ghcr.io), or embedded OpenShift. + Set registry.domain, registry.vaultPath, and registry.passwordVaultKey for your scenario. +*/}} +{{- $regEnabled := or .Values.registry.enabled .Values.global.registry.enabled }} +{{- $regDomain := .Values.registry.domain | default .Values.global.registry.domain }} +{{- $regUser := .Values.registry.user | default .Values.global.registry.user }} +{{- $regVaultPath := .Values.registry.vaultPath | default .Values.global.registry.vaultPath }} +{{- $regPasswordKey := .Values.registry.passwordVaultKey | default .Values.global.registry.passwordVaultKey }} +{{- if $regEnabled }} --- apiVersion: "external-secrets.io/v1beta1" kind: ExternalSecret metadata: - name: qtodo-registry-auth + name: {{ .Values.registry.authSecretName }} namespace: {{ .Release.Namespace | default .Values.global.namespace }} + annotations: + argocd.argoproj.io/sync-wave: "15" spec: refreshInterval: 15s secretStoreRef: name: {{ .Values.global.secretStore.name }} kind: {{ .Values.global.secretStore.kind }} target: - name: qtodo-registry-auth + name: {{ .Values.registry.authSecretName }} template: type: kubernetes.io/dockerconfigjson data: .dockerconfigjson: | { "auths": { - "{{ .Values.registry.domain | default (printf "quay-registry-quay-quay-enterprise.%s" .Values.global.hubClusterDomain) }}": { - "auth": "{{ `{{ printf "%s:%s" "` }}{{ .Values.registry.user }}{{ `" .password | b64enc }}` }}" + "{{ tpl (required "registry.domain (or global.registry.domain) is required" $regDomain) $ }}": { + "auth": "{{ `{{ printf "%s:%s" "` }}{{ $regUser }}{{ `" .password | b64enc }}` }}" } } } data: - secretKey: password remoteRef: - key: {{ .Values.registry.vaultPath }} - property: {{ .Values.registry.passwordVaultKey }} \ No newline at end of file + key: {{ required "registry.vaultPath (or global.registry.vaultPath) is required" $regVaultPath }} + property: {{ required "registry.passwordVaultKey (or global.registry.passwordVaultKey) is required" $regPasswordKey }} +{{- end }} diff --git a/charts/supply-chain/templates/secrets/qtodo-registry-pass.yaml b/charts/supply-chain/templates/secrets/qtodo-registry-pass.yaml deleted file mode 100644 index 65406f8d..00000000 --- a/charts/supply-chain/templates/secrets/qtodo-registry-pass.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if eq .Values.quay.enabled true }} ---- -apiVersion: "external-secrets.io/v1beta1" -kind: ExternalSecret -metadata: - name: qtodo-quay-password - namespace: {{ .Release.Namespace | default .Values.global.namespace }} -spec: - refreshInterval: 15s - secretStoreRef: - name: {{ .Values.global.secretStore.name }} - kind: {{ .Values.global.secretStore.kind }} - target: - name: qtodo-quay-password - template: - type: Opaque - data: - password: "{{ `{{ .password }}` }}" - data: - - secretKey: password - remoteRef: - key: {{ .Values.registry.vaultPath }} - property: {{ .Values.registry.passwordVaultKey }} -{{- end }} \ No newline at end of file diff --git a/charts/supply-chain/templates/tasks/restart-qtodo.yaml b/charts/supply-chain/templates/tasks/restart-qtodo.yaml new file mode 100644 index 00000000..9fe23387 --- /dev/null +++ b/charts/supply-chain/templates/tasks/restart-qtodo.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: restart-qtodo + namespace: {{ .Values.global.namespace }} +spec: + description: Restart the qtodo deployment to pick up the latest built image. + steps: + - name: restart + image: registry.redhat.io/openshift4/ose-tools-rhel9:latest + resources: {} + script: | + #!/usr/bin/env bash + set -euo pipefail + if ! oc get deployment/qtodo -n qtodo >/dev/null 2>&1; then + echo "qtodo deployment not found in namespace qtodo -- skipping restart." + exit 0 + fi + echo "Restarting qtodo deployment to pull the latest image..." + oc rollout restart deployment/qtodo -n qtodo + echo "Waiting for rollout to complete..." + oc rollout status deployment/qtodo -n qtodo --timeout=120s + echo "qtodo deployment restarted successfully." diff --git a/charts/supply-chain/values.yaml b/charts/supply-chain/values.yaml index 4a54d048..fb8b3170 100644 --- a/charts/supply-chain/values.yaml +++ b/charts/supply-chain/values.yaml @@ -5,6 +5,13 @@ global: secretStore: name: "vault-backend" kind: "ClusterSecretStore" + registry: + enabled: false + domain: "" + repository: "" + user: "" + vaultPath: "" + passwordVaultKey: "" rhtas: enabled: false @@ -26,26 +33,80 @@ qtodo: buildCmd: "./mvnw -s settings.xml package -DskipTests -Dquarkus.package.jar.type=uber-jar" containerfile: "./Containerfile" -# quay registry configuration -# used to create a new user in quay. Generic registry configuration is below. +# =========================================================================== +# QUAY USER PROVISIONER (only for built-in Quay registry) +# When enabled, runs a CronJob that provisions users in the built-in Quay instance. +# This is Quay-specific and not needed for BYO or embedded OpenShift registries. +# =========================================================================== quay: - enabled: true + enabled: false email: "quay-user@example.com" job: image: registry.access.redhat.com/ubi9/ubi:9.7-1764794285 schedule: "*/5 * * * *" -# container registry configuration +# =========================================================================== +# REGISTRY CONFIGURATION (option-agnostic) +# Works for all registry types: built-in Quay, BYO (quay.io, ghcr.io, etc.), +# or embedded OpenShift image registry. Set the values for your scenario. +# +# Scenario-specific values (set in values-hub.yaml overrides): +# Built-in Quay: +# domain: quay-registry-quay-quay-enterprise.apps. +# vaultPath: secret/data/hub/infra/quay/quay-users +# passwordVaultKey: quay-user-password +# BYO (quay.io, ghcr.io, etc.): +# domain: quay.io (or your registry hostname) +# vaultPath: secret/data/hub/infra/registry/registry-user +# passwordVaultKey: registry-password +# Embedded OpenShift: +# domain: default-route-openshift-image-registry.apps. +# vaultPath: secret/data/hub/infra/registry/registry-user +# passwordVaultKey: registry-password +# embeddedOpenShift.ensureImageNamespaceRBAC: true +# =========================================================================== registry: - # Commented to generate it dynamically - # domain: "quay-registry-quay-quay-enterprise.hub.example.com" - org: "ztvp" - repo: "qtodo" + # Set to true to create the registry auth secret (dockerconfigjson) + enabled: false + # Registry hostname (REQUIRED when enabled) + domain: "" + # Repository path within the registry, e.g. "ztvp/qtodo" + # (falls back to global.registry.repository) + repository: "" + # Whether to verify TLS when pushing to the registry tlsVerify: "true" - user: "quay-user" - passwordVaultKey: "quay-user-password" - # Infrastructure secrets - stored in quay path - vaultPath: "secret/data/hub/infra/quay/quay-users" + # Registry username (falls back to global.registry.user) + user: "" + # Vault path to the secret containing the registry password + vaultPath: "" + # Key within the Vault secret that holds the password + passwordVaultKey: "" + # Secret name for registry auth (dockerconfigjson) + authSecretName: "qtodo-registry-auth" + # Embedded OpenShift registry only: create image namespace and grant + # pipeline SA system:image-builder so the pipeline can push. Set to true only when + # using the in-cluster OpenShift image registry; leave false for other registries. + embeddedOpenShift: + ensureImageNamespaceRBAC: false + # Service account name for the route-enabler Job + routeEnablerServiceAccount: "registry-route-enabler" + tokenRefresher: + enabled: false + schedule: "0 */6 * * *" + tokenDuration: "172800" + +# Container images used by Jobs and CronJobs (externalized for overridability) +images: + ubi: "registry.access.redhat.com/ubi9/ubi:9.7-1764794285" + spiffeHelper: "registry.redhat.io/zero-trust-workload-identity-manager/spiffe-helper-rhel9:v0.10.0" + +# Service account name used by pipeline Jobs (CronJob, seed Job) +pipelineServiceAccount: "pipeline" + +# pipeline run configuration +pipelinerun: + # Set to true to automatically trigger a pipeline run on ArgoCD sync + enabled: false # spire configuration spire: diff --git a/charts/ztvp-certificates/files/extract-certificates.sh.tpl b/charts/ztvp-certificates/files/extract-certificates.sh.tpl index e54bab1b..ebd85f89 100644 --- a/charts/ztvp-certificates/files/extract-certificates.sh.tpl +++ b/charts/ztvp-certificates/files/extract-certificates.sh.tpl @@ -398,7 +398,79 @@ fi {{- end }} # =================================================================== -# PHASE 9: Automatic Rollout (if enabled) +# PHASE 9: Configure Node-Level Image Pull Trust (if enabled) +# Creates a ConfigMap with registry-hostname keys containing the ingress CA, +# then patches image.config.openshift.io/cluster to reference it. +# This allows kubelet to pull images from registries behind the cluster ingress +# (e.g. built-in Quay) without "x509: certificate signed by unknown authority". +# =================================================================== + +{{- if .Values.imagePullTrust.enabled }} +{{- if .Values.imagePullTrust.registries }} +log "Configuring node-level image pull trust" + +if [[ "$INGRESS_CA_FOUND" != "true" ]]; then + error "imagePullTrust is enabled but no ingress CA was extracted. Cannot configure image pull trust." + error "Ensure autoDetect is true or provide a custom ingress CA source." + exit 1 +fi + +# Build the ConfigMap data with registry hostnames as keys +# Each key is a registry hostname, value is the ingress CA PEM +REGISTRY_CM_DATA="" +{{- range .Values.imagePullTrust.registries }} +log "Adding registry trust: {{ tpl . $ }}" +{{- end }} + +log "Creating ConfigMap: {{ .Values.global.namespace }}/{{ .Values.imagePullTrust.configMapName }}" + +# Combine all ingress CA files into one PEM for registry trust +COMBINED_INGRESS_CA="${TEMP_DIR}/combined-ingress-ca.pem" +> "${COMBINED_INGRESS_CA}" +for f in "${TEMP_DIR}"/ingress-ca-*.crt; do + [[ -f "$f" ]] || continue + cat "$f" >> "${COMBINED_INGRESS_CA}" + echo "" >> "${COMBINED_INGRESS_CA}" +done + +# Create the ConfigMap with registry hostnames as keys +cat <<'CMEOF' > "${TEMP_DIR}/registry-cas-cm.yaml" +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.imagePullTrust.configMapName }} + namespace: {{ .Values.global.namespace }} + labels: + app.kubernetes.io/name: ztvp-certificates + app.kubernetes.io/component: image-pull-trust + app.kubernetes.io/managed-by: ztvp-certificate-manager +data: {} +CMEOF + +oc apply -f "${TEMP_DIR}/registry-cas-cm.yaml" + +# Patch each registry hostname as a key with the ingress CA PEM +{{- range .Values.imagePullTrust.registries }} +log "Patching ConfigMap key: {{ tpl . $ }}" +oc create configmap {{ $.Values.imagePullTrust.configMapName }} \ + -n {{ $.Values.global.namespace }} \ + --from-file="{{ tpl . $ }}=${COMBINED_INGRESS_CA}" \ + --dry-run=client -o yaml | oc apply -f - +{{- end }} + +# Patch image.config.openshift.io/cluster to reference the ConfigMap +log "Patching image.config.openshift.io/cluster additionalTrustedCA" +oc patch image.config.openshift.io/cluster --type merge \ + -p "{\"spec\":{\"additionalTrustedCA\":{\"name\":\"{{ .Values.imagePullTrust.configMapName }}\"}}}" + +log "Node-level image pull trust configured successfully" +log "Note: MCO will roll this out to nodes (may take a few minutes)" + +{{- end }} +{{- end }} + +# =================================================================== +# PHASE 10: Automatic Rollout (if enabled) # =================================================================== {{- if .Values.rollout.enabled }} diff --git a/charts/ztvp-certificates/templates/rbac.yaml b/charts/ztvp-certificates/templates/rbac.yaml index e2ce3cbc..5ca51fe4 100644 --- a/charts/ztvp-certificates/templates/rbac.yaml +++ b/charts/ztvp-certificates/templates/rbac.yaml @@ -84,6 +84,39 @@ subjects: - kind: ServiceAccount name: {{ include "ztvp-certificates.serviceAccountName" . }} namespace: {{ .Values.global.namespace }} +{{- if .Values.imagePullTrust.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "ztvp-certificates.fullname" . }}-image-config + annotations: + argocd.argoproj.io/sync-wave: "-9" + labels: + {{- include "ztvp-certificates.labels" . | nindent 4 }} +rules: +# Patch image.config.openshift.io/cluster to set additionalTrustedCA +- apiGroups: ["config.openshift.io"] + resources: ["images"] + verbs: ["get", "patch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "ztvp-certificates.fullname" . }}-image-config + annotations: + argocd.argoproj.io/sync-wave: "-9" + labels: + {{- include "ztvp-certificates.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "ztvp-certificates.fullname" . }}-image-config +subjects: +- kind: ServiceAccount + name: {{ include "ztvp-certificates.serviceAccountName" . }} + namespace: {{ .Values.global.namespace }} +{{- end }} {{- if .Values.rollout.enabled }} --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/charts/ztvp-certificates/values.yaml b/charts/ztvp-certificates/values.yaml index 6b45fd74..7fe49813 100644 --- a/charts/ztvp-certificates/values.yaml +++ b/charts/ztvp-certificates/values.yaml @@ -185,6 +185,25 @@ distribution: # Requires: ManagedClusterSetBinding in the namespace method: "acm-policy" +# Node-level image pull trust for kubelet +# Configures image.config.openshift.io/cluster additionalTrustedCA so that +# kubelet can pull images from registries behind the cluster's ingress (e.g. +# built-in Quay). Without this, kubelet image pulls fail with +# "x509: certificate signed by unknown authority" when the ingress uses a +# self-signed or cluster-internal CA. +imagePullTrust: + # Set to true to create the registry-CA ConfigMap and patch image.config + enabled: false + # ConfigMap name created in openshift-config for image.config additionalTrustedCA + configMapName: ztvp-registry-cas + # Registry hostnames that need the ingress CA for image pulls. + # Each becomes a key in the ConfigMap with the ingress CA as the value. + # Use {{ .Values.global.hubClusterDomain }} in values-hub.yaml overrides. + registries: [] + # Example (built-in Quay): + # registries: + # - quay-registry-quay-quay-enterprise.apps.example.com + # Debugging options debug: # Enable verbose logging in extraction job diff --git a/common/scripts/vault-utils.sh b/common/scripts/vault-utils.sh index 42ef0c31..a72f1343 100755 --- a/common/scripts/vault-utils.sh +++ b/common/scripts/vault-utils.sh @@ -32,9 +32,16 @@ EXTRA_VARS_FILE=$(mktemp) trap "rm -f ${EXTRA_VARS_FILE}" EXIT if [ "$(yq ".clusterGroup.applications.vault.jwt.enabled // \"false\"" "${MAIN_CLUSTERGROUP_FILE}")" == "true" ]; then - OCP_DOMAIN="$(oc get dns cluster -o jsonpath='{.spec.baseDomain}')" - OIDC_DISCOVERY_URL="$(yq ".clusterGroup.applications.vault.jwt.oidcDiscoveryUrl" "${MAIN_CLUSTERGROUP_FILE}" | sed "s/{{ \$.Values.global.clusterDomain }}/${OCP_DOMAIN}/g")" - JWT_ROLES="$(yq -o json ".clusterGroup.applications.vault.jwt.roles" "${MAIN_CLUSTERGROUP_FILE}" | sed "s/{{ \$.Values.global.clusterDomain }}/${OCP_DOMAIN}/g")" + OPENSHIFT_DOMAIN="$(oc get dns cluster -o jsonpath='{.spec.baseDomain}')" + GLOBAL_PATTERN="$(yq -r '.global.pattern // ""' "${PATTERNPATH}/values-global.yaml")" + GLOBAL_PATTERN="${GLOBAL_PATTERN:-${PATTERN_NAME}}" + # Replace Helm-style placeholders so Ansible/Jinja2 never sees "{{ $.Values... }}" (invalid Jinja2). + _subst_vault_yaml() { + sed -e "s/{{ \$.Values.global.clusterDomain }}/${OPENSHIFT_DOMAIN}/g" \ + -e "s/{{ \$.Values.global.pattern }}/${GLOBAL_PATTERN}/g" + } + OIDC_DISCOVERY_URL="$(yq ".clusterGroup.applications.vault.jwt.oidcDiscoveryUrl" "${MAIN_CLUSTERGROUP_FILE}" | _subst_vault_yaml)" + JWT_ROLES="$(yq -o json ".clusterGroup.applications.vault.jwt.roles" "${MAIN_CLUSTERGROUP_FILE}" | _subst_vault_yaml)" # Extract JWT policies (policies ending in -jwt-secret) JWT_POLICIES="$(yq -o json ".clusterGroup.applications.vault.policies" "${MAIN_CLUSTERGROUP_FILE}" | jq '[.[] | select(.name | test("-jwt-secret$"))]')" diff --git a/docs/SYNC-WAVE-INVENTORY.md b/docs/SYNC-WAVE-INVENTORY.md index d0708d5e..fa7a57c1 100644 --- a/docs/SYNC-WAVE-INVENTORY.md +++ b/docs/SYNC-WAVE-INVENTORY.md @@ -46,7 +46,9 @@ Every sync-wave in the repository, in order. **App** = hub-level Argo CD Applica | 36 | └ keycloak | chart | keycloak.yaml (Keycloak CR) | | 36 | └ quay-registry | chart | object-bucket-claim | | 36 | └ acs-central | chart | admin-password-secret, central-htpasswd-external-secret, keycloak-client-secret-external-secret | -| 36 | └ qtodo | chart | truststore-secret-external-secret | +| 36 | └ qtodo | chart | truststore-secret-external-secret, registry-external-secret | +| 38+0 | └ qtodo | chart | registry-seed SA, ClusterRole, ClusterRoleBinding | +| 38+5 | └ qtodo | chart (hook) | registry-seed-image (Sync hook Job -- mirrors upstream image to configured registry) | | 37 | └ quay-registry | chart | quay-s3-setup-serviceaccount (5 resources) | | 37 | └ acs-central | chart | create-htpasswd-field (Job) | | 38 | qtodo | **App** | | @@ -69,6 +71,10 @@ Every sync-wave in the repository, in order. **App** = hub-level Argo CD Applica | 46 | └ acs-secured-cluster | chart | secured-cluster-cr | | 46 | └ rhtas-operator | chart | securesign | | 48 | supply-chain | **App** | | +| 48+0 | └ supply-chain | chart | registry-image-namespace (Namespace, RBAC), pipeline-sa, tasks (incl. restart-qtodo), secrets (quay-pass, rhtpa-pass), quay-user, rhtas/rhtpa-config, pipeline-qtodo-restarter (Role+RoleBinding in qtodo ns) | +| 48+1 | └ supply-chain | chart (hook) | enable-registry-default-route (Sync hook Job) | +| 48+10 | └ supply-chain | chart (hook) | registry-token-refresher-seed (Sync hook Job — writes SA token to Vault) | +| 48+15 | └ supply-chain | chart | qtodo-registry-auth ExternalSecret (reads token from Vault) | | 49 | └ rhtpa-operator | chart | spiffe-helper-config | | 51 | acs-policies | **App** | After ACS Central + Secured Cluster | | 51 | └ rhtpa-operator | chart | trusted-profile-analyzer (supporting objects) | @@ -226,7 +232,10 @@ Charts marked **(external)** have been externalized to standalone repositories m | Resource | Old | Current | | --- | ---: | ---: | +| registry-seed-job.yaml (SA, ClusterRole, ClusterRoleBinding) | --- | 0 | +| registry-seed-job.yaml (Sync hook Job) | --- | 5 | | truststore-secret-external-secret.yaml | 5 | 36 | +| registry-external-secret.yaml | --- | 36 | | postgresql-statefulset.yaml | 10 | 41 | | postgresql-service.yaml | 10 | 41 | | qtodo-truststore-config.yaml | 10 | 41 | @@ -235,9 +244,17 @@ Charts marked **(external)** have been externalized to standalone repositories m ### supply-chain (`charts/supply-chain/templates/`) — App wave: 48 -| Resource | Old | Current | -| --- | ---: | ---: | -| workspaces.yaml | 20 | 51 | +Resources without an explicit sync-wave default to wave 0. These include: pipeline-sa, pipeline-qtodo, tasks/* (incl. restart-qtodo), secrets/qtodo-quay-pass, secrets/qtodo-rhtpa-pass, rhtas-config, rhtpa-config, quay/quay-user-cm, quay/quay-user-job, pipeline-qtodo-restarter (Role+RoleBinding). + +| Resource | Old | Current | Notes | +| --- | ---: | ---: | --- | +| registry-image-namespace.yaml (Namespace, RoleBinding, SA, ClusterRole, ClusterRoleBinding) | — | 0 | Foundation RBAC for embedded OpenShift registry | +| enable-registry-default-route (Sync hook Job) | — | 1 | Exposes the OpenShift image registry route | +| registry-token-refresher-seed (Sync hook Job) | — | 10 | Writes initial SA token to Vault | +| qtodo-registry-auth ExternalSecret | — | 15 | Reads registry token from Vault; must run after seed Job | +| workspaces.yaml | 20 | 51 | Pipeline PVCs | +| pipelinerun-qtodo.yaml (PostSync hook Job + RBAC) | — | — | Job wraps `oc create` of PipelineRun (PipelineRun is excluded from ArgoCD tracking) | +| pipeline-qtodo.yaml `finally` section | — | — | restart-qtodo Task runs after successful verify-image | ### docs/DEVELOPMENT.md (example snippet, not deployed) diff --git a/docs/supply-chain.md b/docs/supply-chain.md index d2459fad..553dab5d 100644 --- a/docs/supply-chain.md +++ b/docs/supply-chain.md @@ -20,14 +20,193 @@ In our demo, we will use a number of additional ZTVP components. These component * [Multicloud Object Gateway](https://docs.redhat.com/en/documentation/red_hat_openshift_container_storage/4.8/html/managing_hybrid_and_multicloud_resources/index) is a data service for OpenShift that provides an S3-compatible object storage. In our case, this component is necessary to provide a storage system to Quay. * [Red Hat OpenShift Pipelines](https://docs.redhat.com/en/documentation/red_hat_openshift_pipelines/1.20) is a cloud-native CI/CD solution built on the Tekton framework. We will use this product to automate our secure supply chain process, but you could use your own CI/CD solution if one exists. -## Automatic approach +## Bring Your Own (BYO) Container Registry -To automate the application building and certifying process, we will use _Red Hat OpenShift Pipelines_. +By default, ZTVP deploys a built-in Red Hat Quay registry. However, you can use your own container registry (e.g., quay.io, Docker Hub, GitHub Container Registry, or a private registry) instead. -ZTVP will create a `Pipeline` in our cluster called **qtodo-supply-chain** that will orchestrate the various tasks necessary to build the application from its source code, generate a container image, and publish the resulting image to the defined OCI registry. Within the pipeline, an SBOM containing the build's contents will be generated, binaries and the build attestation will be signed, and the validity of those signatures will be verified. +### Configuration Steps + +1. **Disable built-in Quay registry** (optional - if not using Quay): Comment out the Quay-related applications in `values-hub.yaml`: `quay-enterprise` namespace, `quay-operator` subscription, and `quay-registry` application. + +2. **Configure registry credentials in Vault** (**BYO registry only**): Per VP rule, add your registry credentials to `~/values-secrets.yaml` (or `~/values-secret.yaml` / `~/values-secret-layered-zero-trust.yaml` per VP lookup order): + + ```bash + # Copy template to local file if not already done + cp values-secret.yaml.template ~/values-secrets.yaml + ``` + + Uncomment the `registry-user` secret and replace the placeholder with your registry token or password: + + ```yaml + - name: registry-user + vaultPrefixes: + - hub/infra/registry + fields: + - name: registry-password + value: "REPLACE_WITH_REGISTRY_TOKEN" + onMissingValue: error + ``` + + > **Note**: This secret is only required for **BYO/external registries** (Option 2). **Built-in Quay** (Option 1) uses the auto-generated `quay-users` secret. **Embedded OpenShift registry** (Option 3) does not need a manual secret when the automatic token refresher is enabled (see [Embedded OpenShift Registry](#embedded-openshift-registry)) -- the refresher creates and rotates the Vault credential automatically. + > + > **Note**: Never commit `~/values-secrets.yaml` (or your local values-secret file) to git. This file contains sensitive credentials and should remain local. + +3. **Set the global registry configuration in values-hub.yaml**: Uncomment the matching `global.registry` block at the top of `values-hub.yaml`. All registry credentials are defined once here; both the `supply-chain` and `qtodo` charts inherit them automatically. + + ```yaml + # Example: BYO/External Registry (Option 2) + global: + registry: + enabled: true + domain: quay.io + repository: your-org/qtodo + user: your-username + vaultPath: "secret/data/hub/infra/registry/registry-user" + passwordVaultKey: "registry-password" + ``` + + See the **Registry Options** section at the top of `values-hub.yaml` for the full set of option blocks (built-in Quay, BYO, embedded OpenShift). + +4. **Enable supply-chain-specific overrides** (if needed): The `supply-chain` application may need additional overrides depending on the registry type. These are set in the `supply-chain` overrides section of `values-hub.yaml`: + * **Built-in Quay**: Enable `quay.enabled` (Quay user provisioner CronJob) and `registry.tlsVerify: "false"` (self-signed certs). + * **Embedded OpenShift**: Enable `registry.embeddedOpenShift.ensureImageNamespaceRBAC` (creates image namespace and push RBAC) and optionally `registry.embeddedOpenShift.tokenRefresher.enabled` (see [Embedded OpenShift Registry](#embedded-openshift-registry)). + * **BYO/External**: No extra overrides needed. + + > **Note**: The qtodo chart automatically derives its image from `global.registry.domain` and `global.registry.repository` when `global.registry.enabled=true`. No per-app image override is needed. + +### Required Configuration + +These parameters are set in the `global.registry` block at the top of `values-hub.yaml`: + +| Parameter | Description | Example | +| ---------------------------------- | ------------------------------------ | ---------------------------------------------- | +| `global.registry.enabled` | Enable registry auth secret creation | `true` | +| `global.registry.domain` | Registry hostname (REQUIRED) | `quay.io`, `ghcr.io`, `registry.example.com` | +| `global.registry.repository` | Repository path (org/image) | `ztvp/qtodo`, `my-org/my-app` | +| `global.registry.user` | Registry username | `my-robot-account` | +| `global.registry.vaultPath` | Vault path for registry password | `secret/data/hub/infra/registry/registry-user` | +| `global.registry.passwordVaultKey` | Key within the Vault secret | `registry-password` | + +> **Note**: All registry types (built-in Quay, BYO, embedded OpenShift) use the same `global.registry` parameters. Both the `supply-chain` and `qtodo` charts fall back to these values when their local registry values are empty. See the Vault Paths table below for scenario-specific values. + +### Vault Paths + +Registry credentials are stored at different paths based on registry type: + +| Registry Type | Vault Path | Password Key | +| ------------- | ---------------------------------------------- | -------------------- | +| Built-in Quay | `secret/data/hub/infra/quay/quay-users` | `quay-user-password` | +| BYO Registry | `secret/data/hub/infra/registry/registry-user` | `registry-password` | +| Embedded OpenShift | `secret/data/hub/infra/registry/registry-user` | `registry-password` | + +Set `global.registry.vaultPath` and `global.registry.passwordVaultKey` in the `global.registry` block to match your scenario. When `global.registry.enabled` is false or unset (default), no registry auth secret is created (fresh install state). + +The Vault policy `hub-supply-chain-jwt-secret` grants read access to both paths for the pipeline service account. For the embedded OpenShift registry, the policy also grants `create` and `update` capabilities on the registry path so the automatic token refresher can write fresh tokens back to Vault. + +### Embedded OpenShift Registry + +To use the in-cluster OpenShift image registry instead of an external registry: + +1. **Uncomment the Option 3 `global.registry` block** in `values-hub.yaml` so `global.registry` points at the embedded registry (domain, repository, vault paths). Use `user: _token` when using automatic token refresh (bearer tokens; the username is not significant to the registry). + + ```yaml + global: + registry: + enabled: true + domain: default-route-openshift-image-registry.apps.{{ .Values.global.clusterDomain }} + repository: ztvp/qtodo + user: _token + vaultPath: "secret/data/hub/infra/registry/registry-user" + passwordVaultKey: "registry-password" + ``` + +2. **Enable `registry.embeddedOpenShift.ensureImageNamespaceRBAC`** in the supply-chain overrides. The chart will automatically: + * Create the image namespace from the first component of `global.registry.repository` (e.g. `ztvp` from `ztvp/qtodo`) + * Grant the pipeline ServiceAccount `system:image-builder` in that namespace + * Enable the default route on the image registry (via a one-time Job) + +3. **Confirm the registry domain** is `default-route-openshift-image-registry.apps.` (set in `global.registry.domain` above). + +4. **Enable automatic token refresh** (recommended): Set `registry.embeddedOpenShift.tokenRefresher.enabled` to `true`. This deploys: + * A **CronJob** (`registry-token-refresher`) that runs every 6 hours. It uses a SPIFFE JWT to authenticate to Vault, creates a fresh `pipeline` ServiceAccount token via the Kubernetes TokenRequest API, and writes it to Vault. + * A one-shot **Sync hook Job** (`registry-token-refresher-seed`) that seeds the initial token on first deploy so the pipeline is ready immediately. + + When the token refresher is enabled, you do **not** need to manually store a token in `~/values-secrets.yaml` for the embedded OpenShift registry. The refresher handles credential lifecycle automatically. + + If you prefer manual token management instead, disable the token refresher and store the output of `oc whoami -t` as the `registry-password` value in `~/values-secrets.yaml`. + +Example `supply-chain` application overrides for embedded OpenShift (registry host, repository, and Vault paths are normally taken from the `global.registry` block): + +```yaml +overrides: + - name: registry.embeddedOpenShift.ensureImageNamespaceRBAC + value: "true" + - name: registry.embeddedOpenShift.tokenRefresher.enabled + value: "true" +``` + +### Node-Level Image Pull Trust + +When using a registry behind the cluster ingress (Option 1: Built-in Quay or Option 3: Embedded OpenShift Registry), kubelet cannot pull images by default because the ingress certificate is self-signed and not trusted at the node level. + +The `ztvp-certificates` application handles this by patching `image.config.openshift.io/cluster` with the ingress CA certificate for the configured registry hostnames. Enable it by uncommenting the `imagePullTrust` overrides in `values-hub.yaml`: + +```yaml +# ztvp-certificates overrides +- name: imagePullTrust.enabled + value: "true" +- name: imagePullTrust.registries[0] + value: +``` + +Set `` to match your registry option: + +| Option | Registry Hostname | +| ----------------------- | ------------------------------------------------------------- | +| Option 1: Built-in Quay | `quay-registry-quay-quay-enterprise.apps.` | +| Option 3: Embedded OpenShift | `default-route-openshift-image-registry.apps.` | + +> **Note**: Option 2 (BYO/External Registry) does not require `imagePullTrust` because external registries like quay.io and ghcr.io use publicly trusted certificates. + +### ArgoCD PVC Health Check + +The supply-chain chart creates a `PersistentVolumeClaim` (`qtodo-workspace-source`) for the pipeline workspace. Depending on the storage class, this PVC may remain in `Pending` state until a pod is scheduled -- which is expected behavior, but ArgoCD reports it as `Progressing`, preventing the application from reaching `Healthy` status. + +A custom `resourceHealthChecks` entry in `values-hub.yaml` teaches ArgoCD to treat `Pending` PVCs as `Healthy`: + +```yaml +resourceHealthChecks: + - kind: PersistentVolumeClaim + check: | + hs = {} + if obj.status ~= nil and obj.status.phase ~= nil then + if obj.status.phase == "Bound" then + hs.status = "Healthy" + hs.message = "PVC is bound" + elseif obj.status.phase == "Pending" then + hs.status = "Healthy" + hs.message = "PVC is pending" + else + hs.status = "Progressing" + hs.message = "Waiting for PVC" + end + else + hs.status = "Progressing" + hs.message = "Waiting for PVC status" + end + return hs +``` + +## Pipeline + +To build and certify the application, we will use _Red Hat OpenShift Pipelines_. + +ZTVP creates a `Pipeline` in our cluster called **qtodo-supply-chain** that orchestrates the various tasks necessary to build the application from its source code, generate a container image, and publish the resulting image to the defined OCI registry. Within the pipeline, an SBOM containing the build's contents will be generated, binaries and the build attestation will be signed, and the validity of those signatures will be verified. ### How to run the pipeline +Once the supply-chain application has synced in ArgoCD, start the pipeline using one of the methods below. + #### Using OpenShift Web Console 1. Launch the OpenShift Web console. @@ -35,12 +214,11 @@ ZTVP will create a `Pipeline` in our cluster called **qtodo-supply-chain** that 3. Locate the **qtodo-supply-chain** pipeline. It's within the **layered-zero-trust-hub** project. 4. In the kebab menu (three vertical dots) from the right-hand, select **Start**. - Review the configurable parameters. Most parameters should be correct with their default values if we are in single-cluster mode. But, double-check their values just in case. - - At the bottom we have the **workspaces**. These must be configured manually. + Review the configurable parameters. Most parameters should be correct with their default values if we are in single-cluster mode. But, double-check their values just in case. - * For **qtodo-source**, select `PersistentVolumeClaim` and the PVC name is `qtodo-workspace-source`. - * For **registry-auth-config**, select `Secret` and the name of the secret is `qtodo-registry-auth`. + At the bottom we have the **workspaces**. These must be configured manually. + * For **qtodo-source**, select `PersistentVolumeClaim` and the PVC name is `qtodo-workspace-source`. + * For **registry-auth-config**, select `Secret` and the name of the secret is `qtodo-registry-auth`. 5. Press **Start** to finish and run the pipeline. @@ -62,12 +240,12 @@ spec: timeouts: pipeline: 1h0m0s workspaces: - - name: qtodo-source - persistentVolumeClaim: - claimName: qtodo-workspace-source - - name: registry-auth-config - secret: - secretName: qtodo-registry-auth + - name: qtodo-source + persistentVolumeClaim: + claimName: qtodo-workspace-source + - name: registry-auth-config + secret: + secretName: qtodo-registry-auth ``` As was described previously, verify the values associated with the PVC storage and registry configuration. @@ -84,6 +262,19 @@ You can review the current pipeline logs using the [Tekton CLI](https://tekton.d tkn pipeline logs -n layered-zero-trust-hub -L -f ``` +Or use `oc` commands to monitor progress: + +```shell +# List pipeline runs +oc get pipelinerun -n layered-zero-trust-hub + +# Check task status for a specific run +oc get taskruns -n layered-zero-trust-hub -l tekton.dev/pipelineRun= + +# View logs for a specific task +oc logs -n layered-zero-trust-hub -l tekton.dev/pipelineRun=,tekton.dev/pipelineTask= +``` + ### Pipeline tasks The pipeline we have prepared has the following steps: @@ -96,9 +287,13 @@ The pipeline we have prepared has the following steps: * **qtodo-sign-image**. Signs the container image. * **qtodo-generate-sbom**. Generates an SBOM from the image. * **qtodo-sbom-attestation**. Creates a (signed) attestation, and attaches it to the image. -* **qtodo-upload-sbom**. Uploads the generated SBOM file to RHTPA. +* **qtodo-upload-sbom**. Uploads the generated SBOM file to RHTPA. * **qtodo-verify-image**. Verifies the attestation and the signature attached to the image. +**Finally task:** + +* **restart-qtodo**. Runs after all tasks complete. If `qtodo-verify-image` succeeded and the `qtodo` Deployment exists, it restarts the Deployment (`oc rollout restart`) so the application picks up the newly built and signed image. If the Deployment is not yet present (e.g., the pipeline ran before the qtodo application was deployed), the task exits gracefully. + ### Inspecting the results #### Openshift Web UI @@ -191,18 +386,18 @@ The credentials to access the Quay web interface can be obtained as follows: * Quay URL - ```shell - echo "https://$(oc get route -n quay-enterprise \ - -l quay-component=quay-app-route \ - -o jsonpath='{.items[0].spec.host}')" - ``` + ```shell + echo "https://$(oc get route -n quay-enterprise \ + -l quay-component=quay-app-route \ + -o jsonpath='{.items[0].spec.host}')" + ``` * Quay username: The same one you specified in `values-hub.yaml` or **quay-user**. * Quay password: - ```shell - oc get secret -n layered-zero-trust-hub qtodo-quay-password -o json | jq '.data["password"] | @base64d' - ``` + ```shell + oc get secret -n layered-zero-trust-hub qtodo-quay-password -o json | jq '.data["password"] | @base64d' + ``` Now that we have the credentials, we can check the content in Quay. @@ -231,19 +426,19 @@ The RHTPA web UI uses OIDC for user authentication. If you are using the **Keycl * RHTPA URL - ```shell - echo "https://$(oc get route -n trusted-profile-analyzer \ - -l app.kubernetes.io/name=server \ - -o jsonpath='{.items[0].spec.host}')" - ``` + ```shell + echo "https://$(oc get route -n trusted-profile-analyzer \ + -l app.kubernetes.io/name=server \ + -o jsonpath='{.items[0].spec.host}')" + ``` * RHTPA user: **rhtpa-user** * RHTPA user password - ```shell - oc get secret keycloak-users -n keycloak-system -o json \ - | jq '.data["rhtpa-user-password"] | @base64d' - ``` + ```shell + oc get secret keycloak-users -n keycloak-system -o json \ + | jq '.data["rhtpa-user-password"] | @base64d' + ``` To review our SBOM within the RHTPA web UI: diff --git a/scripts/gen-byo-container-registry-variants.py b/scripts/gen-byo-container-registry-variants.py new file mode 100755 index 00000000..4a586f90 --- /dev/null +++ b/scripts/gen-byo-container-registry-variants.py @@ -0,0 +1,534 @@ +#!/usr/bin/env python3 +"""Generate values-hub.yaml variants for BYO container registry options. + +Reads the default values-hub.yaml (all supply-chain components commented out) +and produces up to 3 variants with the chosen registry option enabled: + + Option 1: Built-in Quay Registry + Option 2: BYO / External Registry (e.g. quay.io, ghcr.io) + Option 3: Embedded OpenShift Image Registry + +Each variant also enables the common supply-chain stack (OpenShift Pipelines, +ODF, NooBaa, RHTAS, RHTPA, and their namespaces/subscriptions/vault roles). + +Registry credentials are centralized in a single `global.registry` block at +the top of values-hub.yaml. Both the supply-chain and qtodo charts fall back +to `global.registry.*` when their local registry values are empty. + +Usage: + # Generate all 3 variants under /tmp + python3 scripts/gen-byo-container-registry-variants.py + + # Generate a single variant + python3 scripts/gen-byo-container-registry-variants.py --option 2 + + # Custom base file and output directory + python3 scripts/gen-byo-container-registry-variants.py \\ + --base my-values-hub.yaml --outdir /tmp/variants +""" + +import argparse +import os +import re +import sys + + +def uncomment_line(line): + """Remove one layer of comment: ' # foo' -> ' foo'.""" + return re.sub(r"^(\s*)# ?", r"\1", line, count=1) + + +def uncomment_lines_matching(lines, patterns): + """Uncomment individual lines matching any of the given patterns.""" + result = [] + for line in lines: + matched = False + for pat in patterns: + if re.search(pat, line): + result.append(uncomment_line(line)) + matched = True + break + if not matched: + result.append(line) + return result + + +def _uncomment_multiline_block(lines, trigger_re, body_re): + """Uncomment a contiguous block: first line matches *trigger_re*, + subsequent lines match *body_re*. Both the trigger and body + lines are uncommented.""" + new = [] + i = 0 + while i < len(lines): + if re.search(trigger_re, lines[i]): + while i < len(lines) and re.search(body_re, lines[i]): + new.append(uncomment_line(lines[i])) + i += 1 + continue + new.append(lines[i]) + i += 1 + return new + + +def _uncomment_until_sentinel(lines, trigger_re, sentinel_re, prev_re=None): + """Uncomment from trigger line until a sentinel (exclusive).""" + new = [] + i = 0 + while i < len(lines): + prev_ok = prev_re is None or (i > 0 and re.search(prev_re, lines[i - 1])) + if re.search(trigger_re, lines[i]) and prev_ok: + while i < len(lines): + if re.match(r"^\s*$", lines[i]): + break + if re.match(r"^\s{4}\w", lines[i]): + break + if re.search(sentinel_re, lines[i]): + break + new.append(uncomment_line(lines[i])) + i += 1 + continue + new.append(lines[i]) + i += 1 + return new + + +# --------------------------------------------------------------------------- +# Global registry block +# --------------------------------------------------------------------------- +def enable_global_registry(lines, option_num): + """Uncomment the global.registry block for the selected option. + + The base file contains three commented blocks: + # OPTION 1: Built-in Quay Registry + # global: + # registry: + # ... + # OPTION 2: ... + # global: + # registry: + # ... + # OPTION 3: ... + # global: + # registry: + # ... + + This function uncomments only the block matching option_num. + """ + target_header = f"# OPTION {option_num}:" + result = [] + i = 0 + while i < len(lines): + line = lines[i] + + if re.search(re.escape(target_header), line): + result.append(line) + i += 1 + while i < len(lines): + if re.match(r"^# OPTION \d+:", lines[i]): + break + if re.match(r"^$", lines[i]): + break + if re.match(r"^[^#]", lines[i]): + break + result.append(uncomment_line(lines[i])) + i += 1 + continue + + result.append(line) + i += 1 + return result + + +# --------------------------------------------------------------------------- +# Supply-chain app enabler +# --------------------------------------------------------------------------- +def enable_supply_chain_app(lines, option_num): + """Enable the supply-chain app and its option-specific overrides. + + Pass 1: strip one comment layer from all supply-chain block lines. + Pass 2: selectively uncomment option-specific and common overrides. + """ + # --- Pass 1: strip outer comment from all supply-chain lines ---------- + pass1 = [] + in_block = False + block_start = -1 + block_end = -1 + + for idx, line in enumerate(lines): + if re.search(r"# Secure Supply Chain - Uncomment to enable", line): + in_block = True + block_start = idx + 1 + pass1.append(line) + continue + if in_block and re.match(r"^\s{4}#\s*$", line): + in_block = False + block_end = idx + pass1.append(line) + continue + if in_block: + pass1.append(uncomment_line(line)) + else: + pass1.append(line) + + if block_start < 0: + return pass1 + + # --- Pass 2: selectively uncomment option overrides ------------------- + final = [] + for idx, line in enumerate(pass1): + if not (block_start <= idx < block_end): + final.append(line) + continue + + stripped = line.lstrip() + if not stripped.startswith("#"): + final.append(line) + continue + + # Always uncomment RHTAS and RHTPA flags + if re.search(r"# - name: rhtas\.enabled", line) or re.search( + r"# - name: rhtpa\.enabled", line + ): + final.append(uncomment_line(line)) + continue + if re.search(r"#\s+value:", line) and final: + prev = final[-1] + if "rhtas.enabled" in prev or "rhtpa.enabled" in prev: + final.append(uncomment_line(line)) + continue + + # Option 1 (Built-in Quay): uncomment quay.enabled and tlsVerify + if option_num == 1: + if re.search(r"# - name: quay\.enabled", line) or re.search( + r"# - name: registry\.tlsVerify", line + ): + final.append(uncomment_line(line)) + continue + if re.search(r"#\s+value:", line) and final: + prev = final[-1] + if "quay.enabled" in prev or "registry.tlsVerify" in prev: + final.append(uncomment_line(line)) + continue + + # Option 3 (Embedded OpenShift): uncomment ensureImageNamespaceRBAC + if option_num == 3: + if re.search(r"# - name: registry\.embeddedOpenShift", line): + final.append(uncomment_line(line)) + continue + if re.search(r"#\s+value:", line) and final: + prev = final[-1] + if "embeddedOpenShift" in prev: + final.append(uncomment_line(line)) + continue + + final.append(line) + + return final + + +# --------------------------------------------------------------------------- +# Common supply-chain components (shared by all 3 options) +# --------------------------------------------------------------------------- +def apply_common_supply_chain(lines): + """Uncomment all components common to every supply-chain option.""" + + # Namespace: openshift-pipelines + lines = uncomment_lines_matching(lines, [r"^\s*# - openshift-pipelines\s*$"]) + + # Namespace: openshift-storage + lines = _uncomment_multiline_block( + lines, + r"# - openshift-storage:", + r"#\s+(- openshift-storage:|operatorGroup:|targetNamespace:" + r"|annotations:|labels:" + r"|openshift\.io/cluster-monitoring" + r"|argocd\.argoproj\.io/sync-wave.*26)", + ) + + # Namespace: trusted-artifact-signer + lines = _uncomment_multiline_block( + lines, + r"# - trusted-artifact-signer:", + r"#\s+(- trusted-artifact-signer:" + r"|annotations:|labels:" + r"|argocd\.argoproj\.io/sync-wave.*32.*Auto-created" + r"|openshift\.io/cluster-monitoring)", + ) + + # Namespace: rhtpa-operator + lines = _uncomment_multiline_block( + lines, + r"# - rhtpa-operator:", + r"#\s+(- rhtpa-operator:|operatorGroup:" + r"|targetNamespace: rhtpa" + r"|annotations:" + r"|argocd\.argoproj\.io/sync-wave.*26.*Create before operator)", + ) + + # Namespace: trusted-profile-analyzer + lines = _uncomment_multiline_block( + lines, + r"# - trusted-profile-analyzer:", + r"#\s+(- trusted-profile-analyzer:" + r"|annotations:|labels:" + r"|argocd\.argoproj\.io/sync-wave.*32.*Create before RHTPA" + r"|openshift\.io/cluster-monitoring)", + ) + + # Subscription: openshift-pipelines + new = [] + i = 0 + while i < len(lines): + prev = lines[i - 1] if i > 0 else "" + if re.search(r"# openshift-pipelines:", lines[i]) and re.search( + r"Uncomment to enable OpenShift Pipelines", prev + ): + while i < len(lines) and re.search( + r"#\s*(openshift-pipelines:" + r"|name: openshift-pipelines" + r"|namespace: openshift-operators)", + lines[i], + ): + new.append(uncomment_line(lines[i])) + i += 1 + continue + new.append(lines[i]) + i += 1 + lines = new + + # Subscription: odf + lines = _uncomment_multiline_block( + lines, + r"# odf:", + r"#\s*(odf:|name: odf-operator|namespace: openshift-storage" + r"|channel: stable-4" + r"|annotations:" + r"|argocd\.argoproj\.io/sync-wave.*27.*Install after OperatorGroup)", + ) + + # Subscription: rhtas-operator + lines = _uncomment_multiline_block( + lines, + r"# rhtas-operator:", + r"#\s*(rhtas-operator:|name: rhtas-operator" + r"|namespace: openshift-operators|channel: stable-v1\.3" + r"|annotations:" + r"|argocd\.argoproj\.io/sync-wave.*29" + r"|catalogSource: redhat-operators)", + ) + + # Subscription: rhtpa-operator + new = [] + i = 0 + while i < len(lines): + prev2 = lines[i - 2] if i > 1 else "" + if re.search(r"# rhtpa-operator:", lines[i]) and re.search(r"Channel:", prev2): + while i < len(lines) and re.search( + r"#\s*(rhtpa-operator:|name: rhtpa-operator" + r"|namespace: rhtpa-operator" + r"|channel: stable-v1\.1" + r"|catalogSource: redhat-operators" + r"|annotations:" + r"|argocd\.argoproj\.io/sync-wave.*27" + r".*Install after OperatorGroup.*before applications)", + lines[i], + ): + new.append(uncomment_line(lines[i])) + i += 1 + continue + new.append(lines[i]) + i += 1 + lines = new + + # Vault JWT roles: rhtpa and supply-chain + lines = uncomment_lines_matching( + lines, + [ + r"#\s+- name: rhtpa\s*$", + r"#\s+audience: rhtpa", + r"#\s+subject: spiffe://.*ns/trusted-profile-analyzer", + r"#\s+policies:\s*$", + r"#\s+- hub-infra-rhtpa-jwt-secret", + r"#\s+- name: supply-chain\s*$", + r"#\s+audience: supply-chain", + r"#\s+subject: spiffe://.*sa/pipeline", + r"#\s+- hub-supply-chain-jwt-secret", + ], + ) + + # Application: noobaa-mcg + lines = _uncomment_multiline_block( + lines, + r"# noobaa-mcg:", + r"#\s*(noobaa-mcg:|name: noobaa-mcg|namespace: openshift-storage" + r"|project: hub|path: charts/noobaa-mcg|annotations:" + r"|argocd\.argoproj\.io/sync-wave.*36)", + ) + + # Application: trusted-artifact-signer + lines = _uncomment_until_sentinel( + lines, + r"# trusted-artifact-signer:", + r"# RHTPA \(Red Hat", + prev_re=r"Depends on:", + ) + + # Application: trusted-profile-analyzer + lines = _uncomment_until_sentinel( + lines, + r"# trusted-profile-analyzer:", + r"PLACEHOLDER_NEVER_MATCH", + prev_re=r"Depends on:", + ) + + return lines + + +# --------------------------------------------------------------------------- +# Per-option enablers +# --------------------------------------------------------------------------- +def enable_quay_namespace_and_sub(lines): + """Enable quay-enterprise namespace, quay-operator sub, quay-registry app.""" + + lines = _uncomment_multiline_block( + lines, + r"# - quay-enterprise:", + r"#\s+(- quay-enterprise:" + r"|annotations:|labels:" + r"|argocd\.argoproj\.io/sync-wave.*32.*Create before" + r"|openshift\.io/cluster-monitoring)", + ) + + lines = _uncomment_multiline_block( + lines, + r"# quay-operator:", + r"#\s*(quay-operator:|name: quay-operator" + r"|namespace: openshift-operators|channel: stable-3" + r"|annotations:" + r"|argocd\.argoproj\.io/sync-wave.*28)", + ) + + lines = _uncomment_multiline_block( + lines, + r"# quay-registry:", + r"#\s*(quay-registry:|name: quay-registry" + r"|namespace: quay-enterprise|project: hub" + r"|chart: quay|chartVersion: 0\.1|annotations:" + r"|argocd\.argoproj\.io/sync-wave.*41)", + ) + + return lines + + +def enable_image_pull_trust(lines, hostname): + """Enable imagePullTrust in ztvp-certificates overrides.""" + result = [] + for line in lines: + if re.search(r"# - name: imagePullTrust\.enabled", line): + result.append(uncomment_line(line)) + elif ( + re.search(r'#\s+value: "true"\s*$', line) + and result + and "imagePullTrust.enabled" in result[-1] + ): + result.append(uncomment_line(line)) + elif re.search(r"# - name: imagePullTrust\.registries\[0\]", line): + result.append(uncomment_line(line)) + elif ( + re.search(r"#\s+value:", line) + and result + and "imagePullTrust.registries" in result[-1] + ): + result.append(re.sub(r"#\s+value:.*", f" value: {hostname}", line)) + else: + result.append(line) + return result + + +# --------------------------------------------------------------------------- +# Top-level generator +# --------------------------------------------------------------------------- +OPTION_LABELS = { + 1: "built-in-quay-registry", + 2: "byo-external-registry", + 3: "embedded-openshift-registry", +} + + +def generate_variant(base_path, option_num, output_path): + with open(base_path) as fh: + lines = fh.readlines() + + lines = apply_common_supply_chain(lines) + lines = enable_global_registry(lines, option_num) + lines = enable_supply_chain_app(lines, option_num) + + if option_num == 1: + lines = enable_quay_namespace_and_sub(lines) + lines = enable_image_pull_trust( + lines, + "quay-registry-quay-quay-enterprise.apps." + "{{ $.Values.global.clusterDomain }}", + ) + + if option_num == 3: + lines = enable_image_pull_trust( + lines, + "default-route-openshift-image-registry.apps." + "{{ $.Values.global.clusterDomain }}", + ) + + with open(output_path, "w") as fh: + fh.writelines(lines) + + label = OPTION_LABELS.get(option_num, f"option-{option_num}") + print(f" Option {option_num} ({label}) -> {output_path}") + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--base", + default=None, + help="Base values-hub.yaml to read (default: /values-hub.yaml)", + ) + parser.add_argument( + "--outdir", + default=None, + help="Output directory (default: /tmp)", + ) + parser.add_argument( + "--option", + type=int, + choices=[1, 2, 3], + default=None, + help="Generate only this option (default: all 3)", + ) + args = parser.parse_args() + + repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + base = args.base or os.path.join(repo_root, "values-hub.yaml") + outdir = args.outdir or "/tmp" + + if not os.path.isfile(base): + print(f"ERROR: base file not found: {base}", file=sys.stderr) + sys.exit(1) + + os.makedirs(outdir, exist_ok=True) + + options = [args.option] if args.option else [1, 2, 3] + print(f"Base: {base}") + print(f"Output directory: {outdir}") + for opt in options: + label = OPTION_LABELS[opt] + out = os.path.join(outdir, f"values-hub-{label}.yaml") + generate_variant(base, opt, out) + + print("Done.") + + +if __name__ == "__main__": + main() diff --git a/values-coco-dev.yaml b/values-coco-dev.yaml index 70c956f0..614f7083 100644 --- a/values-coco-dev.yaml +++ b/values-coco-dev.yaml @@ -164,7 +164,7 @@ clusterGroup: # rhtas-operator: # name: rhtas-operator # namespace: openshift-operators - # channel: stable + # channel: stable-v1.3 # annotations: # argocd.argoproj.io/sync-wave: "29" # Install after Quay operator, before applications # catalogSource: redhat-operators diff --git a/values-global.yaml b/values-global.yaml index c050c5fb..741e09ac 100644 --- a/values-global.yaml +++ b/values-global.yaml @@ -9,4 +9,4 @@ main: clusterGroupName: hub multiSourceConfig: enabled: true - clusterGroupChartVersion: "0.9.*" + clusterGroupChartVersion: "0.9.47" diff --git a/values-hub.yaml b/values-hub.yaml index e2c5304d..dae08bf8 100644 --- a/values-hub.yaml +++ b/values-hub.yaml @@ -11,6 +11,42 @@ spire: route.openshift.io/termination: reencrypt route.openshift.io/destination-ca-certificate-secret: spire-bundle +# =========================================================================== +# Global registry configuration (shared by qtodo and supply-chain) +# Define once here; both charts fall back to global.registry.* when their +# local registry values are empty. Per-app overrides still take precedence. +# +# The gen-byo-container-registry-variants.py script uncomments the matching +# OPTION block for each variant. +# =========================================================================== +# OPTION 1: Built-in Quay Registry +# global: +# registry: +# enabled: true +# domain: quay-registry-quay-quay-enterprise.apps.{{ .Values.global.clusterDomain }} +# repository: ztvp/qtodo +# user: quay-user +# vaultPath: "secret/data/hub/infra/quay/quay-users" +# passwordVaultKey: "quay-user-password" +# OPTION 2: BYO/External Registry (quay.io, ghcr.io, etc.) +# global: +# registry: +# enabled: true +# domain: quay.io +# repository: your-org/qtodo +# user: your-username +# vaultPath: "secret/data/hub/infra/registry/registry-user" +# passwordVaultKey: "registry-password" +# OPTION 3: Embedded OpenShift Registry +# global: +# registry: +# enabled: true +# domain: default-route-openshift-image-registry.apps.{{ .Values.global.clusterDomain }} +# repository: ztvp/qtodo +# user: _token +# vaultPath: "secret/data/hub/infra/registry/registry-user" +# passwordVaultKey: "registry-password" + clusterGroup: name: hub isHubCluster: true @@ -133,7 +169,7 @@ clusterGroup: # rhtas-operator: # name: rhtas-operator # namespace: openshift-operators - # channel: stable + # channel: stable-v1.3 # annotations: # argocd.argoproj.io/sync-wave: "29" # Install after Quay operator, before applications # catalogSource: redhat-operators @@ -224,6 +260,15 @@ clusterGroup: - name: rollout.strategy value: labeled + # Node-level image pull trust for kubelet + # Required when pulling images from registries behind the cluster ingress + # (e.g. built-in Quay, embedded OpenShift registry). Patches image.config.openshift.io/cluster. + # Uncomment and set the registry hostname when enabling Option 1 (Quay) or Option 3 (embedded OpenShift). + # - name: imagePullTrust.enabled + # value: "true" + # - name: imagePullTrust.registries[0] + # value: default-route-openshift-image-registry.apps.{{ $.Values.global.clusterDomain }} + # Note: additionalCertificates (complex nested array) temporarily disabled # Need to find proper way to pass complex structures in Validated Patterns acm: @@ -268,7 +313,7 @@ clusterGroup: # via rawResultStorage in scan-setting.yaml. The explicit PVC causes # ArgoCD 'Progressing' status on storage with WaitForFirstConsumer mode. - name: compliance.storage.enabled - value: false + value: "false" vault: name: vault namespace: vault @@ -308,6 +353,9 @@ clusterGroup: path "secret/data/hub/infra/quay/*" { capabilities = ["read"] } + path "secret/data/hub/infra/registry/*" { + capabilities = ["read", "create", "update"] + } path "secret/data/hub/infra/rhtpa/rhtpa-oidc-cli" { capabilities = ["read"] } @@ -328,10 +376,10 @@ clusterGroup: # subject: spiffe://apps.{{ $.Values.global.clusterDomain }}/ns/trusted-profile-analyzer/sa/rhtpa # policies: # - hub-infra-rhtpa-jwt-secret - # Supply chain vault role (for Tekton pipelines) + # Supply chain vault role (for Tekton pipelines; enable with supply-chain app / Option 3 or BYO registry) # - name: supply-chain # audience: supply-chain - # subject: spiffe://apps.{{ $.Values.global.clusterDomain }}/ns/pipeline/sa/pipeline + # subject: spiffe://apps.{{ $.Values.global.clusterDomain }}/ns/{{ $.Values.global.pattern }}-hub/sa/pipeline # policies: # - hub-supply-chain-jwt-secret # Shared Object Storage Backend @@ -482,28 +530,23 @@ clusterGroup: - .imagePullSecrets[]|select(.name | contains("-dockercfg-")) overrides: - name: app.oidc.enabled - value: true + value: "true" - name: app.spire.enabled - value: true + value: "true" - name: app.vault.url value: https://vault.vault.svc.cluster.local:8200 - name: app.vault.role value: qtodo - name: app.vault.secretPath value: secret/data/apps/qtodo/qtodo-db - # For Secure Supply Chain, we changed the qtodo image to use the one built in the secure supply chain - # - name: app.images.main.name - # value: quay-registry-quay-quay-enterprise.apps.{{ $.Values.global.clusterDomain }}/ztvp/qtodo - # - name: app.images.main.version - # value: latest - # Uncomment to enable registry authentication - # - name: app.images.main.registry.auth - # value: true - # - name: app.images.main.registry.user - # value: quay-user - # - name: app.images.main.registry.passwordVaultKey - # value: quay-user-password - # Secure Supply Chain - Uncomment to enable + # Secure Supply Chain: when global.registry.enabled=true the chart + # automatically derives the image from global.registry.domain/repository. + # No override needed here. + # Uncomment to seed the registry with the upstream qtodo image + # before the supply-chain pipeline runs (avoids ImagePullBackOff on first install) + # - name: app.seedImage.enabled + # value: "true" + # Secure Supply Chain - Uncomment to enable (required for Option 1, 2, or 3 registry flows in docs) # supply-chain: # name: supply-chain # project: hub @@ -515,17 +558,29 @@ clusterGroup: # jqPathExpressions: # - .imagePullSecrets[]|select(.name | contains("-dockercfg-")) # overrides: - # # Don't forget to uncomment the RHTAS and RHTPA components in this same file - # - name: rhtas.enabled - # value: true - # - name: rhtpa.enabled - # value: true - # - name: registry.tlsVerify - # value: "false" - # - name: registry.user - # value: quay-admin - # - name: registry.passwordVaultKey - # value: quay-admin-password + # # Registry credentials are inherited from global.registry. + # # Only set app-specific overrides below. + # # Built-in Quay: uncomment to enable the Quay user provisioner CronJob + # # - name: quay.enabled + # # value: "true" + # # Built-in Quay: self-signed certs require TLS verify off + # # - name: registry.tlsVerify + # # value: "false" + # # Embedded OpenShift (Option 3): create image namespace and grant push RBAC + # # - name: registry.embeddedOpenShift.ensureImageNamespaceRBAC + # # value: "true" + # # Embedded OpenShift (Option 3): periodically refresh pipeline SA token in Vault + # # - name: registry.embeddedOpenShift.tokenRefresher.enabled + # # value: "true" + # # Enable RHTAS signing + # # - name: rhtas.enabled + # # value: "true" + # # Enable RHTPA SBOM upload + # # - name: rhtpa.enabled + # # value: "true" + # # Uncomment to auto-trigger a pipeline run on every ArgoCD sync + # # - name: pipelinerun.enabled + # # value: "true" # # ACS Central Services acs-central: @@ -585,6 +640,28 @@ clusterGroup: argocd.argoproj.io/sync-wave: "51" argoCD: resourceHealthChecks: + - check: | + local hs = {} + if obj.status ~= nil and obj.status.phase ~= nil then + if obj.status.phase == "Bound" then + hs.status = "Healthy" + hs.message = "PVC is bound" + elseif obj.status.phase == "Pending" then + hs.status = "Healthy" + hs.message = "PVC is pending" + elseif obj.status.phase == "Lost" then + hs.status = "Degraded" + hs.message = "PVC is lost" + else + hs.status = "Progressing" + hs.message = obj.status.phase + end + else + hs.status = "Progressing" + hs.message = "Waiting for PVC status" + end + return hs + kind: PersistentVolumeClaim - check: | local hs = {} hs.status = "Progressing" diff --git a/values-secret.yaml.template b/values-secret.yaml.template index 9185fc4f..945279fd 100644 --- a/values-secret.yaml.template +++ b/values-secret.yaml.template @@ -16,7 +16,8 @@ version: "2.0" # Infrastructure Secrets (hub/infra/*): # hub/infra/keycloak/ - Keycloak infrastructure secrets # hub/infra/rhtpa/ - RHTPA infrastructure secrets -# hub/infra/quay/ - Quay registry credentials +# hub/infra/quay/ - Built-in Quay registry credentials (auto-generated) +# hub/infra/registry/ - BYO container registry credentials (user-provided) # hub/infra/users/ - User credentials managed by IdP # # Framework Secrets: @@ -174,33 +175,37 @@ secrets: vaultPolicy: alphaNumericPolicy # =========================================================================== - # QUAY INFRASTRUCTURE SECRETS (hub/infra/quay/) - # Registry credentials for Quay - # Policy: hub-infra-quay-secret (read access to hub/infra/quay/*) + # BUILT-IN QUAY REGISTRY SECRETS (hub/infra/quay/) + # Auto-generated credentials for built-in Quay registry + # Used by: Quay user provisioner job, supply-chain pipeline (when quay.enabled=true) + # Policy: hub-supply-chain-jwt-secret (read access to hub/infra/quay/*) # =========================================================================== - name: quay-users vaultPrefixes: - hub/infra/quay fields: - - name: quay-admin-password - onMissingValue: generate - vaultPolicy: validatedPatternDefaultPolicy - name: quay-user-password onMissingValue: generate vaultPolicy: validatedPatternDefaultPolicy - # External Registry Credentials (e.g., Quay.io, Docker Hub, GHCR) - # Reserved for future use with container signing workflows - # Uncomment and provide your credentials when needed - #- name: external-registry + # =========================================================================== + # BYO REGISTRY SECRETS (hub/infra/registry/) + # Only needed for Option 2 (BYO/external registry, e.g. quay.io, ghcr.io). + # NOT needed for Option 1 (built-in Quay uses quay-users secret) or + # Option 3 (embedded OpenShift registry with token refresher writes to Vault + # automatically -- see docs/supply-chain.md). + # Used by: supply-chain pipeline (push), qtodo (pull) when registry enabled + # Policy: hub-supply-chain-jwt-secret (read access to hub/infra/registry/*) + # + # Uncomment and replace REPLACE_WITH_REGISTRY_TOKEN with your registry + # token/password in your local ~/values-secret-layered-zero-trust.yaml. + # =========================================================================== + #- name: registry-user # vaultPrefixes: - # - hub/infra + # - hub/infra/registry # fields: - # - name: username - # value: "your-registry-username" # Replace with your username - # onMissingValue: error - # - name: password - # value: "your-registry-token" # Replace with your token/password + # - name: registry-password + # value: "REPLACE_WITH_REGISTRY_TOKEN" # onMissingValue: error # ===========================================================================