diff --git a/.gitignore b/.gitignore index 4bae3c6a..06e28cf1 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,10 @@ super-linter-output # GitHub Actions leftovers github_conf +# Python bytecode cache +__pycache__/ +*.pyc + # Editor and IDE specific files .cursorrules .cursor/ diff --git a/scripts/features/features.yaml b/scripts/features/features.yaml new file mode 100644 index 00000000..192b03ff --- /dev/null +++ b/scripts/features/features.yaml @@ -0,0 +1,43 @@ +# Feature registry for gen-feature-variants.py +# Each feature maps to a YAML fragment file in this directory. +# Dependencies are resolved automatically (topological order). +features: + storage: + description: "ODF object storage + NooBaa MCG (S3 backend)" + depends_on: [] + + quay: + description: "Red Hat Quay container registry" + depends_on: [storage] + + rhtas: + description: "Red Hat Trusted Artifact Signer (SPIFFE + Email)" + depends_on: [] + + rhtpa: + description: "Red Hat Trusted Profile Analyzer" + depends_on: [storage] + + pipelines: + description: "OpenShift Pipelines" + depends_on: [] + + supply-chain: + description: "Full secure supply chain pipeline" + depends_on: [pipelines, rhtas, rhtpa, storage] + registry_option_required: true + org: ztvp + image_name: qtodo + +# Registry options (only used with supply-chain feature) +# Each maps to a file under registry/ subdirectory. +registry_options: + 1: + label: "built-in-quay-registry" + file: "registry/option-1-quay.yaml" + 2: + label: "byo-external-registry" + file: "registry/option-2-byo.yaml" + 3: + label: "embedded-openshift-registry" + file: "registry/option-3-embedded-openshift.yaml" diff --git a/scripts/features/pipelines.yaml b/scripts/features/pipelines.yaml new file mode 100644 index 00000000..fffc2c01 --- /dev/null +++ b/scripts/features/pipelines.yaml @@ -0,0 +1,10 @@ +# OpenShift Pipelines (Tekton) +# Required for the secure supply chain pipeline flow +clusterGroup: + namespaces: + - openshift-pipelines + + subscriptions: + openshift-pipelines: + name: openshift-pipelines-operator-rh + namespace: openshift-operators diff --git a/scripts/features/quay.yaml b/scripts/features/quay.yaml new file mode 100644 index 00000000..bdb44f15 --- /dev/null +++ b/scripts/features/quay.yaml @@ -0,0 +1,27 @@ +# Red Hat Quay container registry +# Depends on: storage (ODF + NooBaa MCG for backend) +clusterGroup: + namespaces: + - quay-enterprise: + annotations: + argocd.argoproj.io/sync-wave: "32" + labels: + openshift.io/cluster-monitoring: "true" + + subscriptions: + quay-operator: + name: quay-operator + namespace: openshift-operators + channel: stable-3.15 + annotations: + argocd.argoproj.io/sync-wave: "28" + + applications: + quay-registry: + name: quay-registry + namespace: quay-enterprise + project: hub + chart: quay + chartVersion: 0.1.* + annotations: + argocd.argoproj.io/sync-wave: "41" diff --git a/scripts/features/registry/option-1-quay.yaml b/scripts/features/registry/option-1-quay.yaml new file mode 100644 index 00000000..ef499c06 --- /dev/null +++ b/scripts/features/registry/option-1-quay.yaml @@ -0,0 +1,61 @@ +# OPTION 1: Built-in Quay Registry +# Enables global.registry pointing to the pattern's own Quay instance. +# Includes Quay namespace, subscription, and application (only needed for option 1). +# Adds quay.enabled and registry.tlsVerify overrides to supply-chain app. +# Adds imagePullTrust to ztvp-certificates for node-level kubelet trust. +global: + registry: + enabled: true + domain: "quay-registry-quay-quay-enterprise.apps.{{ .Values.global.clusterDomain }}" + # Placeholders auto-replaced by the generator (supply-chain defines org=ztvp, image_name=qtodo) + repository: org/image-name + user: quay-user + vaultPath: "secret/data/hub/infra/quay/quay-users" + passwordVaultKey: "quay-user-password" + +clusterGroup: + namespaces: + - quay-enterprise: + annotations: + argocd.argoproj.io/sync-wave: "32" + labels: + openshift.io/cluster-monitoring: "true" + + subscriptions: + quay-operator: + name: quay-operator + namespace: openshift-operators + channel: stable-3.15 + annotations: + argocd.argoproj.io/sync-wave: "28" + + applications: + quay-registry: + name: quay-registry + namespace: quay-enterprise + project: hub + chart: quay + chartVersion: 0.1.* + annotations: + argocd.argoproj.io/sync-wave: "41" + overrides: + - name: job.image + value: "registry.redhat.io/openshift4/ose-cli:latest" + + merge_into_applications: + supply-chain: + overrides: + - name: quay.enabled + value: "true" + - name: registry.tlsVerify + value: "false" + - name: rhtas.enabled + value: "true" + - name: rhtpa.enabled + value: "true" + ztvp-certificates: + overrides: + - name: imagePullTrust.enabled + value: "true" + - name: imagePullTrust.registries[0] + value: "quay-registry-quay-quay-enterprise.apps.{{ $.Values.global.clusterDomain }}" diff --git a/scripts/features/registry/option-2-byo.yaml b/scripts/features/registry/option-2-byo.yaml new file mode 100644 index 00000000..873c59fd --- /dev/null +++ b/scripts/features/registry/option-2-byo.yaml @@ -0,0 +1,23 @@ +# OPTION 2: BYO/External Registry (quay.io, ghcr.io, etc.) +# Enables global.registry pointing to an external registry. +# No imagePullTrust needed (external registries use public CAs). +# After generating, update domain/repository/user below and set the password +# in ~/values-secret.yaml (see docs/supply-chain.md for details). +global: + registry: + enabled: true + domain: quay.io + # Placeholders auto-replaced by the generator (supply-chain defines org=ztvp, image_name=qtodo) + repository: org/image-name + user: your-username + vaultPath: "secret/data/hub/infra/registry/registry-user" + passwordVaultKey: "registry-password" + +clusterGroup: + merge_into_applications: + supply-chain: + overrides: + - name: rhtas.enabled + value: "true" + - name: rhtpa.enabled + value: "true" diff --git a/scripts/features/registry/option-3-embedded-openshift.yaml b/scripts/features/registry/option-3-embedded-openshift.yaml new file mode 100644 index 00000000..ecd10198 --- /dev/null +++ b/scripts/features/registry/option-3-embedded-openshift.yaml @@ -0,0 +1,32 @@ +# OPTION 3: Embedded OpenShift Image Registry +# Enables global.registry pointing to the built-in OpenShift image registry. +# Adds embeddedOpenShift overrides to supply-chain app. +# Adds imagePullTrust to ztvp-certificates for node-level kubelet trust. +global: + registry: + enabled: true + domain: "default-route-openshift-image-registry.apps.{{ .Values.global.clusterDomain }}" + # Placeholders auto-replaced by the generator (supply-chain defines org=ztvp, image_name=qtodo) + repository: org/image-name + user: _token + vaultPath: "secret/data/hub/infra/registry/registry-user" + passwordVaultKey: "registry-password" + +clusterGroup: + merge_into_applications: + supply-chain: + overrides: + - name: registry.embeddedOpenShift.ensureImageNamespaceRBAC + value: "true" + - name: registry.embeddedOpenShift.tokenRefresher.enabled + value: "true" + - name: rhtas.enabled + value: "true" + - name: rhtpa.enabled + value: "true" + ztvp-certificates: + overrides: + - name: imagePullTrust.enabled + value: "true" + - name: imagePullTrust.registries[0] + value: "default-route-openshift-image-registry.apps.{{ $.Values.global.clusterDomain }}" diff --git a/scripts/features/rhtas.yaml b/scripts/features/rhtas.yaml new file mode 100644 index 00000000..52d248e6 --- /dev/null +++ b/scripts/features/rhtas.yaml @@ -0,0 +1,38 @@ +# Red Hat Trusted Artifact Signer (RHTAS) with SPIFFE + Email issuers +# Depends on: Vault, SPIRE, Keycloak (all in base config) +clusterGroup: + namespaces: + - trusted-artifact-signer: + annotations: + argocd.argoproj.io/sync-wave: "32" + labels: + openshift.io/cluster-monitoring: "true" + + subscriptions: + rhtas-operator: + name: rhtas-operator + namespace: openshift-operators + channel: stable-v1.3 + annotations: + argocd.argoproj.io/sync-wave: "29" + catalogSource: redhat-operators + + applications: + trusted-artifact-signer: + name: trusted-artifact-signer + namespace: trusted-artifact-signer + project: hub + path: charts/rhtas-operator + annotations: + argocd.argoproj.io/sync-wave: "46" + overrides: + - name: rhtas.zeroTrust.spire.enabled + value: "true" + - name: rhtas.zeroTrust.spire.trustDomain + value: "apps.{{ $.Values.global.clusterDomain }}" + - name: rhtas.zeroTrust.spire.issuer + value: "https://spire-spiffe-oidc-discovery-provider.apps.{{ $.Values.global.clusterDomain }}" + - name: rhtas.zeroTrust.email.enabled + value: "true" + - name: rhtas.zeroTrust.email.issuer + value: "https://keycloak.apps.{{ $.Values.global.clusterDomain }}/realms/ztvp" diff --git a/scripts/features/rhtpa.yaml b/scripts/features/rhtpa.yaml new file mode 100644 index 00000000..68432516 --- /dev/null +++ b/scripts/features/rhtpa.yaml @@ -0,0 +1,47 @@ +# Red Hat Trusted Profile Analyzer (RHTPA) with SPIFFE Integration +# Depends on: storage (NooBaa MCG), Vault, SPIRE, Keycloak +clusterGroup: + namespaces: + - rhtpa-operator: + operatorGroup: true + targetNamespace: rhtpa-operator + annotations: + argocd.argoproj.io/sync-wave: "26" + - trusted-profile-analyzer: + annotations: + argocd.argoproj.io/sync-wave: "32" + labels: + openshift.io/cluster-monitoring: "true" + + subscriptions: + rhtpa-operator: + name: rhtpa-operator + namespace: rhtpa-operator + channel: stable-v1.1 + catalogSource: redhat-operators + annotations: + argocd.argoproj.io/sync-wave: "27" + + applications: + trusted-profile-analyzer: + name: trusted-profile-analyzer + namespace: trusted-profile-analyzer + project: hub + path: charts/rhtpa-operator + annotations: + argocd.argoproj.io/sync-wave: "41" + ignoreDifferences: + - group: batch + kind: Job + jsonPointers: + - /status + + merge_into_applications: + vault: + jwt: + roles: + - name: rhtpa + audience: rhtpa + subject: "spiffe://apps.{{ $.Values.global.clusterDomain }}/ns/trusted-profile-analyzer/sa/rhtpa" + policies: + - hub-infra-rhtpa-jwt-secret diff --git a/scripts/features/storage.yaml b/scripts/features/storage.yaml new file mode 100644 index 00000000..65dc1de5 --- /dev/null +++ b/scripts/features/storage.yaml @@ -0,0 +1,27 @@ +# ODF + NooBaa MCG: shared object storage backend +# Required for RHTPA and Quay (provides S3-compatible storage via NooBaa MCG) +clusterGroup: + namespaces: + - openshift-storage: + operatorGroup: true + targetNamespace: openshift-storage + annotations: + openshift.io/cluster-monitoring: "true" + argocd.argoproj.io/sync-wave: "26" + + subscriptions: + odf: + name: odf-operator + namespace: openshift-storage + channel: stable-4.20 + annotations: + argocd.argoproj.io/sync-wave: "27" + + applications: + noobaa-mcg: + name: noobaa-mcg + namespace: openshift-storage + project: hub + path: charts/noobaa-mcg + annotations: + argocd.argoproj.io/sync-wave: "36" diff --git a/scripts/features/supply-chain.yaml b/scripts/features/supply-chain.yaml new file mode 100644 index 00000000..f40d9bec --- /dev/null +++ b/scripts/features/supply-chain.yaml @@ -0,0 +1,27 @@ +# Secure Supply Chain application + vault role +# Depends on: pipelines, rhtas, rhtpa, storage (all resolved automatically) +# Requires --registry-option to select the registry backend. +clusterGroup: + applications: + supply-chain: + name: supply-chain + project: hub + path: charts/supply-chain + annotations: + argocd.argoproj.io/sync-wave: "48" + ignoreDifferences: + - kind: ServiceAccount + jqPathExpressions: + - ".imagePullSecrets[]|select(.name | contains(\"-dockercfg-\"))" + + merge_into_applications: + vault: + jwt: + roles: + - name: supply-chain + audience: supply-chain + subject: "spiffe://apps.{{ $.Values.global.clusterDomain }}/ns/{{ $.Values.global.pattern }}-hub/sa/pipeline" + policies: + - hub-supply-chain-jwt-secret + supply-chain: {} + qtodo: {} diff --git a/scripts/gen-byo-container-registry-variants.py b/scripts/gen-byo-container-registry-variants.py deleted file mode 100755 index 4a586f90..00000000 --- a/scripts/gen-byo-container-registry-variants.py +++ /dev/null @@ -1,534 +0,0 @@ -#!/usr/bin/env python3 -"""Generate values-hub.yaml variants for BYO container registry options. - -Reads the default values-hub.yaml (all supply-chain components commented out) -and produces up to 3 variants with the chosen registry option enabled: - - Option 1: Built-in Quay Registry - Option 2: BYO / External Registry (e.g. quay.io, ghcr.io) - Option 3: Embedded OpenShift Image Registry - -Each variant also enables the common supply-chain stack (OpenShift Pipelines, -ODF, NooBaa, RHTAS, RHTPA, and their namespaces/subscriptions/vault roles). - -Registry credentials are centralized in a single `global.registry` block at -the top of values-hub.yaml. Both the supply-chain and qtodo charts fall back -to `global.registry.*` when their local registry values are empty. - -Usage: - # Generate all 3 variants under /tmp - python3 scripts/gen-byo-container-registry-variants.py - - # Generate a single variant - python3 scripts/gen-byo-container-registry-variants.py --option 2 - - # Custom base file and output directory - python3 scripts/gen-byo-container-registry-variants.py \\ - --base my-values-hub.yaml --outdir /tmp/variants -""" - -import argparse -import os -import re -import sys - - -def uncomment_line(line): - """Remove one layer of comment: ' # foo' -> ' foo'.""" - return re.sub(r"^(\s*)# ?", r"\1", line, count=1) - - -def uncomment_lines_matching(lines, patterns): - """Uncomment individual lines matching any of the given patterns.""" - result = [] - for line in lines: - matched = False - for pat in patterns: - if re.search(pat, line): - result.append(uncomment_line(line)) - matched = True - break - if not matched: - result.append(line) - return result - - -def _uncomment_multiline_block(lines, trigger_re, body_re): - """Uncomment a contiguous block: first line matches *trigger_re*, - subsequent lines match *body_re*. Both the trigger and body - lines are uncommented.""" - new = [] - i = 0 - while i < len(lines): - if re.search(trigger_re, lines[i]): - while i < len(lines) and re.search(body_re, lines[i]): - new.append(uncomment_line(lines[i])) - i += 1 - continue - new.append(lines[i]) - i += 1 - return new - - -def _uncomment_until_sentinel(lines, trigger_re, sentinel_re, prev_re=None): - """Uncomment from trigger line until a sentinel (exclusive).""" - new = [] - i = 0 - while i < len(lines): - prev_ok = prev_re is None or (i > 0 and re.search(prev_re, lines[i - 1])) - if re.search(trigger_re, lines[i]) and prev_ok: - while i < len(lines): - if re.match(r"^\s*$", lines[i]): - break - if re.match(r"^\s{4}\w", lines[i]): - break - if re.search(sentinel_re, lines[i]): - break - new.append(uncomment_line(lines[i])) - i += 1 - continue - new.append(lines[i]) - i += 1 - return new - - -# --------------------------------------------------------------------------- -# Global registry block -# --------------------------------------------------------------------------- -def enable_global_registry(lines, option_num): - """Uncomment the global.registry block for the selected option. - - The base file contains three commented blocks: - # OPTION 1: Built-in Quay Registry - # global: - # registry: - # ... - # OPTION 2: ... - # global: - # registry: - # ... - # OPTION 3: ... - # global: - # registry: - # ... - - This function uncomments only the block matching option_num. - """ - target_header = f"# OPTION {option_num}:" - result = [] - i = 0 - while i < len(lines): - line = lines[i] - - if re.search(re.escape(target_header), line): - result.append(line) - i += 1 - while i < len(lines): - if re.match(r"^# OPTION \d+:", lines[i]): - break - if re.match(r"^$", lines[i]): - break - if re.match(r"^[^#]", lines[i]): - break - result.append(uncomment_line(lines[i])) - i += 1 - continue - - result.append(line) - i += 1 - return result - - -# --------------------------------------------------------------------------- -# Supply-chain app enabler -# --------------------------------------------------------------------------- -def enable_supply_chain_app(lines, option_num): - """Enable the supply-chain app and its option-specific overrides. - - Pass 1: strip one comment layer from all supply-chain block lines. - Pass 2: selectively uncomment option-specific and common overrides. - """ - # --- Pass 1: strip outer comment from all supply-chain lines ---------- - pass1 = [] - in_block = False - block_start = -1 - block_end = -1 - - for idx, line in enumerate(lines): - if re.search(r"# Secure Supply Chain - Uncomment to enable", line): - in_block = True - block_start = idx + 1 - pass1.append(line) - continue - if in_block and re.match(r"^\s{4}#\s*$", line): - in_block = False - block_end = idx - pass1.append(line) - continue - if in_block: - pass1.append(uncomment_line(line)) - else: - pass1.append(line) - - if block_start < 0: - return pass1 - - # --- Pass 2: selectively uncomment option overrides ------------------- - final = [] - for idx, line in enumerate(pass1): - if not (block_start <= idx < block_end): - final.append(line) - continue - - stripped = line.lstrip() - if not stripped.startswith("#"): - final.append(line) - continue - - # Always uncomment RHTAS and RHTPA flags - if re.search(r"# - name: rhtas\.enabled", line) or re.search( - r"# - name: rhtpa\.enabled", line - ): - final.append(uncomment_line(line)) - continue - if re.search(r"#\s+value:", line) and final: - prev = final[-1] - if "rhtas.enabled" in prev or "rhtpa.enabled" in prev: - final.append(uncomment_line(line)) - continue - - # Option 1 (Built-in Quay): uncomment quay.enabled and tlsVerify - if option_num == 1: - if re.search(r"# - name: quay\.enabled", line) or re.search( - r"# - name: registry\.tlsVerify", line - ): - final.append(uncomment_line(line)) - continue - if re.search(r"#\s+value:", line) and final: - prev = final[-1] - if "quay.enabled" in prev or "registry.tlsVerify" in prev: - final.append(uncomment_line(line)) - continue - - # Option 3 (Embedded OpenShift): uncomment ensureImageNamespaceRBAC - if option_num == 3: - if re.search(r"# - name: registry\.embeddedOpenShift", line): - final.append(uncomment_line(line)) - continue - if re.search(r"#\s+value:", line) and final: - prev = final[-1] - if "embeddedOpenShift" in prev: - final.append(uncomment_line(line)) - continue - - final.append(line) - - return final - - -# --------------------------------------------------------------------------- -# Common supply-chain components (shared by all 3 options) -# --------------------------------------------------------------------------- -def apply_common_supply_chain(lines): - """Uncomment all components common to every supply-chain option.""" - - # Namespace: openshift-pipelines - lines = uncomment_lines_matching(lines, [r"^\s*# - openshift-pipelines\s*$"]) - - # Namespace: openshift-storage - lines = _uncomment_multiline_block( - lines, - r"# - openshift-storage:", - r"#\s+(- openshift-storage:|operatorGroup:|targetNamespace:" - r"|annotations:|labels:" - r"|openshift\.io/cluster-monitoring" - r"|argocd\.argoproj\.io/sync-wave.*26)", - ) - - # Namespace: trusted-artifact-signer - lines = _uncomment_multiline_block( - lines, - r"# - trusted-artifact-signer:", - r"#\s+(- trusted-artifact-signer:" - r"|annotations:|labels:" - r"|argocd\.argoproj\.io/sync-wave.*32.*Auto-created" - r"|openshift\.io/cluster-monitoring)", - ) - - # Namespace: rhtpa-operator - lines = _uncomment_multiline_block( - lines, - r"# - rhtpa-operator:", - r"#\s+(- rhtpa-operator:|operatorGroup:" - r"|targetNamespace: rhtpa" - r"|annotations:" - r"|argocd\.argoproj\.io/sync-wave.*26.*Create before operator)", - ) - - # Namespace: trusted-profile-analyzer - lines = _uncomment_multiline_block( - lines, - r"# - trusted-profile-analyzer:", - r"#\s+(- trusted-profile-analyzer:" - r"|annotations:|labels:" - r"|argocd\.argoproj\.io/sync-wave.*32.*Create before RHTPA" - r"|openshift\.io/cluster-monitoring)", - ) - - # Subscription: openshift-pipelines - new = [] - i = 0 - while i < len(lines): - prev = lines[i - 1] if i > 0 else "" - if re.search(r"# openshift-pipelines:", lines[i]) and re.search( - r"Uncomment to enable OpenShift Pipelines", prev - ): - while i < len(lines) and re.search( - r"#\s*(openshift-pipelines:" - r"|name: openshift-pipelines" - r"|namespace: openshift-operators)", - lines[i], - ): - new.append(uncomment_line(lines[i])) - i += 1 - continue - new.append(lines[i]) - i += 1 - lines = new - - # Subscription: odf - lines = _uncomment_multiline_block( - lines, - r"# odf:", - r"#\s*(odf:|name: odf-operator|namespace: openshift-storage" - r"|channel: stable-4" - r"|annotations:" - r"|argocd\.argoproj\.io/sync-wave.*27.*Install after OperatorGroup)", - ) - - # Subscription: rhtas-operator - lines = _uncomment_multiline_block( - lines, - r"# rhtas-operator:", - r"#\s*(rhtas-operator:|name: rhtas-operator" - r"|namespace: openshift-operators|channel: stable-v1\.3" - r"|annotations:" - r"|argocd\.argoproj\.io/sync-wave.*29" - r"|catalogSource: redhat-operators)", - ) - - # Subscription: rhtpa-operator - new = [] - i = 0 - while i < len(lines): - prev2 = lines[i - 2] if i > 1 else "" - if re.search(r"# rhtpa-operator:", lines[i]) and re.search(r"Channel:", prev2): - while i < len(lines) and re.search( - r"#\s*(rhtpa-operator:|name: rhtpa-operator" - r"|namespace: rhtpa-operator" - r"|channel: stable-v1\.1" - r"|catalogSource: redhat-operators" - r"|annotations:" - r"|argocd\.argoproj\.io/sync-wave.*27" - r".*Install after OperatorGroup.*before applications)", - lines[i], - ): - new.append(uncomment_line(lines[i])) - i += 1 - continue - new.append(lines[i]) - i += 1 - lines = new - - # Vault JWT roles: rhtpa and supply-chain - lines = uncomment_lines_matching( - lines, - [ - r"#\s+- name: rhtpa\s*$", - r"#\s+audience: rhtpa", - r"#\s+subject: spiffe://.*ns/trusted-profile-analyzer", - r"#\s+policies:\s*$", - r"#\s+- hub-infra-rhtpa-jwt-secret", - r"#\s+- name: supply-chain\s*$", - r"#\s+audience: supply-chain", - r"#\s+subject: spiffe://.*sa/pipeline", - r"#\s+- hub-supply-chain-jwt-secret", - ], - ) - - # Application: noobaa-mcg - lines = _uncomment_multiline_block( - lines, - r"# noobaa-mcg:", - r"#\s*(noobaa-mcg:|name: noobaa-mcg|namespace: openshift-storage" - r"|project: hub|path: charts/noobaa-mcg|annotations:" - r"|argocd\.argoproj\.io/sync-wave.*36)", - ) - - # Application: trusted-artifact-signer - lines = _uncomment_until_sentinel( - lines, - r"# trusted-artifact-signer:", - r"# RHTPA \(Red Hat", - prev_re=r"Depends on:", - ) - - # Application: trusted-profile-analyzer - lines = _uncomment_until_sentinel( - lines, - r"# trusted-profile-analyzer:", - r"PLACEHOLDER_NEVER_MATCH", - prev_re=r"Depends on:", - ) - - return lines - - -# --------------------------------------------------------------------------- -# Per-option enablers -# --------------------------------------------------------------------------- -def enable_quay_namespace_and_sub(lines): - """Enable quay-enterprise namespace, quay-operator sub, quay-registry app.""" - - lines = _uncomment_multiline_block( - lines, - r"# - quay-enterprise:", - r"#\s+(- quay-enterprise:" - r"|annotations:|labels:" - r"|argocd\.argoproj\.io/sync-wave.*32.*Create before" - r"|openshift\.io/cluster-monitoring)", - ) - - lines = _uncomment_multiline_block( - lines, - r"# quay-operator:", - r"#\s*(quay-operator:|name: quay-operator" - r"|namespace: openshift-operators|channel: stable-3" - r"|annotations:" - r"|argocd\.argoproj\.io/sync-wave.*28)", - ) - - lines = _uncomment_multiline_block( - lines, - r"# quay-registry:", - r"#\s*(quay-registry:|name: quay-registry" - r"|namespace: quay-enterprise|project: hub" - r"|chart: quay|chartVersion: 0\.1|annotations:" - r"|argocd\.argoproj\.io/sync-wave.*41)", - ) - - return lines - - -def enable_image_pull_trust(lines, hostname): - """Enable imagePullTrust in ztvp-certificates overrides.""" - result = [] - for line in lines: - if re.search(r"# - name: imagePullTrust\.enabled", line): - result.append(uncomment_line(line)) - elif ( - re.search(r'#\s+value: "true"\s*$', line) - and result - and "imagePullTrust.enabled" in result[-1] - ): - result.append(uncomment_line(line)) - elif re.search(r"# - name: imagePullTrust\.registries\[0\]", line): - result.append(uncomment_line(line)) - elif ( - re.search(r"#\s+value:", line) - and result - and "imagePullTrust.registries" in result[-1] - ): - result.append(re.sub(r"#\s+value:.*", f" value: {hostname}", line)) - else: - result.append(line) - return result - - -# --------------------------------------------------------------------------- -# Top-level generator -# --------------------------------------------------------------------------- -OPTION_LABELS = { - 1: "built-in-quay-registry", - 2: "byo-external-registry", - 3: "embedded-openshift-registry", -} - - -def generate_variant(base_path, option_num, output_path): - with open(base_path) as fh: - lines = fh.readlines() - - lines = apply_common_supply_chain(lines) - lines = enable_global_registry(lines, option_num) - lines = enable_supply_chain_app(lines, option_num) - - if option_num == 1: - lines = enable_quay_namespace_and_sub(lines) - lines = enable_image_pull_trust( - lines, - "quay-registry-quay-quay-enterprise.apps." - "{{ $.Values.global.clusterDomain }}", - ) - - if option_num == 3: - lines = enable_image_pull_trust( - lines, - "default-route-openshift-image-registry.apps." - "{{ $.Values.global.clusterDomain }}", - ) - - with open(output_path, "w") as fh: - fh.writelines(lines) - - label = OPTION_LABELS.get(option_num, f"option-{option_num}") - print(f" Option {option_num} ({label}) -> {output_path}") - - -def main(): - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter, - ) - parser.add_argument( - "--base", - default=None, - help="Base values-hub.yaml to read (default: /values-hub.yaml)", - ) - parser.add_argument( - "--outdir", - default=None, - help="Output directory (default: /tmp)", - ) - parser.add_argument( - "--option", - type=int, - choices=[1, 2, 3], - default=None, - help="Generate only this option (default: all 3)", - ) - args = parser.parse_args() - - repo_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - base = args.base or os.path.join(repo_root, "values-hub.yaml") - outdir = args.outdir or "/tmp" - - if not os.path.isfile(base): - print(f"ERROR: base file not found: {base}", file=sys.stderr) - sys.exit(1) - - os.makedirs(outdir, exist_ok=True) - - options = [args.option] if args.option else [1, 2, 3] - print(f"Base: {base}") - print(f"Output directory: {outdir}") - for opt in options: - label = OPTION_LABELS[opt] - out = os.path.join(outdir, f"values-hub-{label}.yaml") - generate_variant(base, opt, out) - - print("Done.") - - -if __name__ == "__main__": - main() diff --git a/scripts/gen-feature-variants.md b/scripts/gen-feature-variants.md new file mode 100644 index 00000000..d084a79a --- /dev/null +++ b/scripts/gen-feature-variants.md @@ -0,0 +1,105 @@ +# Feature Variant Generator + +`gen-feature-variants.py` generates `values-hub.yaml` variants by composing +declarative feature fragments. Features live as small YAML files under +`scripts/features/` and dependencies between them are resolved automatically. + +## Prerequisites + +* Python 3.9+ +* `ruamel.yaml` library + +## Environment Setup + +### Option A: virtualenv (recommended) + +```bash +cd layered-zero-trust +python3 -m venv .venv +source .venv/bin/activate +pip install -r scripts/requirements.txt +``` + +### Option B: system-wide pip + +```bash +pip install --user -r scripts/requirements.txt +``` + +### Option C: container (Podman / Docker) + +```bash +podman run --rm -it \ + -v "$(pwd):/work:Z" -w /work \ + python:3.12-slim \ + bash -c "pip install -r scripts/requirements.txt && \ + python3 scripts/gen-feature-variants.py --list-features" +``` + +## Usage + +```bash +# List available features and registry options +python3 scripts/gen-feature-variants.py --list-features + +# Enable a single feature (dependencies are resolved automatically) +python3 scripts/gen-feature-variants.py --features rhtpa + +# Enable multiple features +python3 scripts/gen-feature-variants.py --features rhtpa,rhtas + +# Full supply chain: pick a registry option (1, 2, or 3) +python3 scripts/gen-feature-variants.py --features supply-chain --registry-option 1 + +# Generate all three supply-chain registry variants at once +python3 scripts/gen-feature-variants.py --features supply-chain --registry-option all + +# Custom base file and output directory +python3 scripts/gen-feature-variants.py \ + --features rhtpa --base values-hub.yaml --outdir /tmp +``` + +Generated files are written to `/tmp` by default (override with `--outdir`). +The output directory is created automatically if it does not exist. + +## Registry Options (supply-chain only) + +| Option | Description | Notes | +| ------ | --------------------------- | ------------------------------------------ | +| 1 | Built-in Quay registry | Deploys Quay inside the cluster | +| 2 | BYO / external registry | Uses an external registry (e.g. quay.io) | +| 3 | Embedded OpenShift image registry | Uses the built-in OpenShift image registry | + +> **Note:** The registry option fragments use generic `org/image-name` +> placeholders in the `repository` field. When a feature defines `org` +> and `image_name` (the `supply-chain` feature sets them to `ztvp` and +> `qtodo`), the generator replaces both placeholders automatically, so +> the output already contains `ztvp/qtodo`. If you use a custom feature +> without these fields, replace the placeholders manually before applying +> the generated file. + +## How It Works + +1. The script reads the base `values-hub.yaml`. +2. For each requested feature it loads the matching fragment from + `scripts/features/.yaml` and merges it into the base tree. +3. `clusterGroup` sections use type-aware merge strategies: + * **namespaces**: appended only if not already present + * **subscriptions / applications**: add-if-absent + * **merge_into_applications**: deep-merge into _existing_ application + configs (e.g. adding Vault JWT roles or chart overrides) +4. Comments inside `clusterGroup.namespaces`, `clusterGroup.subscriptions`, + and `clusterGroup.applications` are stripped from the generated output to + avoid confusion from commented-out blocks mixing with merged content. + All other comments (top-level headers, `spire`, `sharedValueFiles`, + `imperative`, etc.) are preserved as-is. +5. Basic validation checks for duplicates before writing the result. + +## Adding a New Feature + +1. Create `scripts/features/.yaml` mirroring the `values-hub.yaml` + structure (namespaces, subscriptions, applications). +2. Register it in `scripts/features/features.yaml` with a description and + any `depends_on` entries. +3. If the feature needs to modify an existing application (e.g. add a Vault + JWT role), use the `merge_into_applications` key under `clusterGroup`. diff --git a/scripts/gen-feature-variants.py b/scripts/gen-feature-variants.py new file mode 100755 index 00000000..ca9bf4d3 --- /dev/null +++ b/scripts/gen-feature-variants.py @@ -0,0 +1,484 @@ +#!/usr/bin/env python3 +"""Generate values-hub.yaml variants by composing declarative feature fragments. + +Features are defined as small YAML files under scripts/features/ that mirror the +values-hub.yaml structure. Dependencies between features are resolved +automatically via the registry in scripts/features/features.yaml. + +Prerequisites: + pip install -r scripts/requirements.txt + +Usage: + # Single feature (auto-resolves deps: rhtpa -> storage) + python3 scripts/gen-feature-variants.py --features rhtpa + + # Multiple features + python3 scripts/gen-feature-variants.py --features rhtpa,rhtas + + # Full supply chain with built-in Quay (option 1) + python3 scripts/gen-feature-variants.py --features supply-chain --registry-option 1 + + # Full supply chain with BYO external registry (option 2) + python3 scripts/gen-feature-variants.py --features supply-chain --registry-option 2 + + # Full supply chain with embedded OpenShift image registry (option 3) + python3 scripts/gen-feature-variants.py --features supply-chain --registry-option 3 + + # Generate all 3 supply-chain registry variants at once + python3 scripts/gen-feature-variants.py --features supply-chain --registry-option all + + # Custom base and output directory + python3 scripts/gen-feature-variants.py \\ + --features rhtpa --base values-hub.yaml --outdir /tmp +""" + +import argparse +import copy +import os +import sys +from collections import OrderedDict + +from ruamel.yaml import YAML +from ruamel.yaml.comments import CommentedMap, CommentedSeq + +SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) +FEATURES_DIR = os.path.join(SCRIPT_DIR, "features") +REGISTRY_LABELS = {1: "quay", 2: "byo", 3: "embedded-openshift"} + + +def load_yaml_file(path): + yaml = YAML() + yaml.preserve_quotes = True + with open(path) as fh: + return yaml.load(fh) + + +def _strip_comments(node): + """Recursively remove all ruamel.yaml comments from a YAML subtree.""" + if isinstance(node, CommentedMap): + node.ca.comment = None + node.ca.items.clear() + if hasattr(node.ca, "end"): + node.ca.end = None + for v in node.values(): + _strip_comments(v) + elif isinstance(node, CommentedSeq): + node.ca.comment = None + node.ca.items.clear() + if hasattr(node.ca, "end"): + node.ca.end = None + for item in node: + _strip_comments(item) + + +def load_feature_registry(): + registry_path = os.path.join(FEATURES_DIR, "features.yaml") + data = load_yaml_file(registry_path) + return data["features"], data.get("registry_options", {}) + + +def resolve_dependencies(requested, feature_defs): + """Topological sort: expand requested features with their transitive deps.""" + resolved = OrderedDict() + visiting = set() + + def visit(name): + if name in resolved: + return + if name not in feature_defs: + print(f"ERROR: unknown feature '{name}'", file=sys.stderr) + sys.exit(1) + if name in visiting: + print( + f"ERROR: circular dependency involving '{name}'", + file=sys.stderr, + ) + sys.exit(1) + visiting.add(name) + for dep in feature_defs[name].get("depends_on", []): + visit(dep) + visiting.discard(name) + resolved[name] = True + + for feat in requested: + visit(feat) + return list(resolved.keys()) + + +def _namespace_key(item): + """Extract the unique key from a namespace list entry (string or mapping).""" + if isinstance(item, str): + return item + if isinstance(item, dict): + keys = list(item.keys()) + return keys[0] if keys else None + return None + + +def _merge_namespace_lists(base_list, fragment_list): + """Append namespace entries from fragment_list that are not already in base_list.""" + existing = {_namespace_key(item) for item in base_list} + for item in fragment_list: + key = _namespace_key(item) + if key not in existing: + base_list.append(item) + existing.add(key) + + +def _deep_merge_mappings(base, overlay): + """Recursively merge overlay into base (overlay wins for scalars).""" + for key in overlay: + if ( + key in base + and isinstance(base[key], dict) + and isinstance(overlay[key], dict) + ): + _deep_merge_mappings(base[key], overlay[key]) + elif ( + key in base + and isinstance(base[key], list) + and isinstance(overlay[key], list) + ): + base[key].extend(overlay[key]) + else: + base[key] = overlay[key] + + +def _apply_merge_into(base_apps, merge_into_spec): + """Handle merge_into_applications: merge fragment data into existing app configs. + + merge_into_spec is a mapping like: + vault: + jwt: + roles: [...] + ztvp-certificates: + overrides: [...] + + For each target app, recursively merge into the existing app config. + Lists (roles, overrides) are appended rather than replaced. + """ + for app_name, additions in merge_into_spec.items(): + if app_name not in base_apps: + print( + f"WARNING: merge_into_applications target '{app_name}'" + " not found in base applications", + file=sys.stderr, + ) + continue + _deep_merge_mappings(base_apps[app_name], copy.deepcopy(additions)) + + +def _insert_key_before(mapping, new_key, new_value, before_key): + """Insert new_key into a ruamel.yaml CommentedMap before before_key. + + ruamel.yaml mappings are ordered; plain assignment appends at the end. + This rebuilds the ordering so new_key appears just before before_key. + """ + if before_key not in mapping: + mapping[new_key] = new_value + return + + keys = list(mapping.keys()) + idx = keys.index(before_key) + items = list(mapping.items()) + items.insert(idx, (new_key, new_value)) + for k in keys: + del mapping[k] + for k, v in items: + mapping[k] = v + + +def merge_fragment(base, fragment): + """Merge a single feature fragment into the base YAML tree.""" + if fragment is None: + return + + for top_key in fragment: + if top_key == "clusterGroup": + _merge_cluster_group(base, fragment["clusterGroup"]) + elif top_key in base and isinstance(base[top_key], dict): + _deep_merge_mappings(base[top_key], copy.deepcopy(fragment[top_key])) + elif top_key not in base: + _insert_key_before( + base, + top_key, + copy.deepcopy(fragment[top_key]), + "clusterGroup", + ) + else: + base[top_key] = copy.deepcopy(fragment[top_key]) + + +def _merge_cluster_group(base, frag_cg): + """Merge clusterGroup sections with type-aware strategies.""" + base_cg = base.setdefault("clusterGroup", {}) + + if "namespaces" in frag_cg: + base_ns = base_cg.setdefault("namespaces", []) + _merge_namespace_lists(base_ns, frag_cg["namespaces"]) + + if "subscriptions" in frag_cg: + base_subs = base_cg.setdefault("subscriptions", {}) + for sub_name, sub_val in frag_cg["subscriptions"].items(): + if sub_name not in base_subs: + base_subs[sub_name] = copy.deepcopy(sub_val) + + if "applications" in frag_cg: + base_apps = base_cg.setdefault("applications", {}) + for app_name, app_val in frag_cg["applications"].items(): + if app_name not in base_apps: + base_apps[app_name] = copy.deepcopy(app_val) + + if "merge_into_applications" in frag_cg: + base_apps = base_cg.get("applications", {}) + _apply_merge_into(base_apps, frag_cg["merge_into_applications"]) + + +def validate_output(data): + """Run basic sanity checks on the merged YAML tree.""" + cg = data.get("clusterGroup", {}) + + ns_list = cg.get("namespaces", []) + seen = set() + for item in ns_list: + key = _namespace_key(item) + if key in seen: + print(f"WARNING: duplicate namespace '{key}'", file=sys.stderr) + seen.add(key) + + apps = cg.get("applications", {}) + vault = apps.get("vault", {}) + jwt_roles = vault.get("jwt", {}).get("roles", []) + role_names = set() + for role in jwt_roles: + name = role.get("name") + if name in role_names: + print(f"WARNING: duplicate vault JWT role '{name}'", file=sys.stderr) + role_names.add(name) + + +def _substitute_repository_placeholders(base, org=None, image_name=None): + """Replace 'org' and 'image-name' placeholders in global.registry.repository.""" + repo = str(base.get("global", {}).get("registry", {}).get("repository", "")) + if org: + repo = repo.replace("org/", f"{org}/", 1) + if image_name: + repo = repo.replace("image-name", image_name) + base["global"]["registry"]["repository"] = repo + + +def generate_variant( + base_path, + features_dir, + resolved_features, + registry_fragment_path, + output_path, + org=None, + image_name=None, +): + """Load base, merge all feature fragments + registry option, write output.""" + yaml = YAML() + yaml.preserve_quotes = True + yaml.default_flow_style = False + yaml.width = 4096 + + with open(base_path) as fh: + base = yaml.load(fh) + + for feat_name in resolved_features: + frag_path = os.path.join(features_dir, f"{feat_name}.yaml") + if not os.path.isfile(frag_path): + print(f"ERROR: fragment file not found: {frag_path}", file=sys.stderr) + sys.exit(1) + fragment = load_yaml_file(frag_path) + merge_fragment(base, fragment) + + if registry_fragment_path: + if not os.path.isfile(registry_fragment_path): + print( + f"ERROR: registry fragment not found: {registry_fragment_path}", + file=sys.stderr, + ) + sys.exit(1) + registry_frag = load_yaml_file(registry_fragment_path) + merge_fragment(base, registry_frag) + + if org or image_name: + _substitute_repository_placeholders(base, org=org, image_name=image_name) + + validate_output(base) + cg = base.get("clusterGroup") + if cg: + for key in ("namespaces", "subscriptions", "applications"): + if key in cg: + _strip_comments(cg[key]) + + with open(output_path, "w") as fh: + yaml.dump(base, fh) + + print(f" -> {output_path}") + + +def build_output_name(features, registry_option=None): + """Construct the output filename from features and optional registry option.""" + if "supply-chain" in features: + label = REGISTRY_LABELS.get(registry_option, f"option-{registry_option}") + return f"values-hub-supply-chain-{label}.yaml" + return f"values-hub-{'-'.join(features)}.yaml" + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--features", + default=None, + help="Comma-separated list of features to enable (e.g. rhtpa,rhtas)", + ) + parser.add_argument( + "--registry-option", + default=None, + help=( + "Registry option for supply-chain: " + "1=built-in Quay, " + "2=BYO/external registry, " + "3=embedded OpenShift image registry, " + "'all'=generate all 3 variants" + ), + ) + parser.add_argument( + "--base", + default=None, + help="Base values-hub.yaml to read (default: /values-hub.yaml)", + ) + parser.add_argument( + "--outdir", + default=None, + help="Output directory (default: /tmp)", + ) + parser.add_argument( + "--list-features", + action="store_true", + help="List available features and exit", + ) + args = parser.parse_args() + + feature_defs, registry_opts = load_feature_registry() + + if args.list_features: + print("Available features:") + for name, info in feature_defs.items(): + deps = ", ".join(info.get("depends_on", [])) or "none" + print(f" {name:20s} - {info['description']} (deps: {deps})") + if registry_opts: + print("\nRegistry options (for --registry-option with supply-chain):") + for num, info in registry_opts.items(): + print(f" {num} = {info['label']}") + sys.exit(0) + + if not args.features: + parser.error("--features is required (or use --list-features)") + + repo_root = os.path.dirname(SCRIPT_DIR) + base = args.base or os.path.join(repo_root, "values-hub.yaml") + outdir = args.outdir or "/tmp" + + if not os.path.isfile(base): + print(f"ERROR: base file not found: {base}", file=sys.stderr) + sys.exit(1) + + os.makedirs(outdir, exist_ok=True) + + requested = [f.strip() for f in args.features.split(",")] + resolved = resolve_dependencies(requested, feature_defs) + + org = None + image_name = None + repo_feature = None + for f in resolved: + val = feature_defs.get(f, {}).get("org") + if val: + org = val + repo_feature = f + val = feature_defs.get(f, {}).get("image_name") + if val: + image_name = val + repo_feature = f + + needs_registry = any( + feature_defs.get(f, {}).get("registry_option_required") for f in resolved + ) + if needs_registry and not args.registry_option: + print( + "ERROR: --registry-option is required when supply-chain feature is enabled " + "(use 1, 2, 3, or 'all')", + file=sys.stderr, + ) + sys.exit(1) + + print(f"Base: {base}") + print(f"Output: {outdir}") + print(f"Features: {' -> '.join(resolved)}") + if args.registry_option: + print(f"Registry: option {args.registry_option}") + + if args.registry_option == "all": + for opt_num in [1, 2, 3]: + opt_key = opt_num + opt_info = registry_opts.get(opt_key) + if not opt_info: + print( + f"ERROR: no registry option {opt_key} in features.yaml", + file=sys.stderr, + ) + sys.exit(1) + reg_path = os.path.join(FEATURES_DIR, opt_info["file"]) + out_name = build_output_name(requested, opt_num) + out_path = os.path.join(outdir, out_name) + generate_variant( + base, FEATURES_DIR, resolved, reg_path, out_path, org, image_name + ) + else: + reg_path = None + if args.registry_option: + opt_num = int(args.registry_option) + opt_info = registry_opts.get(opt_num) + if not opt_info: + print( + f"ERROR: no registry option {opt_num} in features.yaml", + file=sys.stderr, + ) + sys.exit(1) + reg_path = os.path.join(FEATURES_DIR, opt_info["file"]) + + out_name = build_output_name( + requested, + int(args.registry_option) if args.registry_option else None, + ) + out_path = os.path.join(outdir, out_name) + generate_variant( + base, FEATURES_DIR, resolved, reg_path, out_path, org, image_name + ) + + if args.registry_option and org and image_name: + print( + f"\nNote: The '{repo_feature}' feature defines org '{org}' and" + f" image_name '{image_name}', so the\n" + f" generated repository has been set to" + f" '{org}/{image_name}' automatically." + ) + elif args.registry_option: + print( + "\nNote: The generated 'repository' value uses generic" + " 'org/image-name' placeholders.\n" + " Replace them with the actual org and image name" + " before applying the file." + ) + + print("Done.") + + +if __name__ == "__main__": + main() diff --git a/scripts/requirements.txt b/scripts/requirements.txt new file mode 100644 index 00000000..c54695a1 --- /dev/null +++ b/scripts/requirements.txt @@ -0,0 +1 @@ +ruamel.yaml>=0.18