From b357850966b44575055924b4d3317d797c07c079 Mon Sep 17 00:00:00 2001 From: Anik Bhattacharjee Date: Tue, 2 Aug 2022 12:25:13 -0400 Subject: [PATCH 1/4] (psa) make workloads compatible with psa:restricted profile (#2820) * (psa) make workloads compatible with psa:restricted profile With the introduction of [Pod Security Admission](https://kubernetes.io/docs/concepts/security/pod-security-admission/#pod-security-admission-labels-for-namespaces), the reccomeneded best practice is to enforce the Restricted policy of admission (see [1] for more details). This PR *) Lables the olm namespace as `enforce:restricted` *) Labels the operators namespace as `enforce:baseline` (to allow existing CSV deployments without securityContext set to deploy in the namespace, which won't be possible with `enforce:resticted`) *) updates the securityContext of olm workload pods(olm-operator, catalog-operator, and CatalogSource registry pods) to adhere to the `Restricted` policy. *) updates the bundle unpacking job to create a pod that adheres to the `Restricted` policy, so that bundles can be unpacked in the `Restricted` namespace. Signed-off-by: Anik Bhattacharjee * (flaky text fix): GC CSV with wrong namespace annotation The test was modifying the `olm.operatornamespace` to an incorrect value, and checking to make sure that the CSV was garbage collected as a result. However, the olm-controller was copying a fresh copy back into the namespace, so whenever the test was able to get a yes reply to the question "is the CSV gone", in the brief window before it was copied back again, the test was passing. This commit fixes that by making sure that if find a CSV that we expected to be garbage collected, it passes if it determines that the CSV is a fresh copy, and not the one modified before. Signed-off-by: Anik Bhattacharjee Upstream-commit: 67177c0c822fbe7d554669262c6b4f54bebad17f Upstream-repository: operator-lifecycle-manager --- staging/operator-lifecycle-manager/Dockerfile | 1 + .../Dockerfile.goreleaser | 1 + .../cmd/catalog/main.go | 6 + .../cmd/catalog/start.go | 2 + .../templates/0000_50_olm_00-namespace.yaml | 6 + ...000_50_olm_07-olm-operator.deployment.yaml | 8 + ...50_olm_08-catalog-operator.deployment.yaml | 10 ++ .../_packageserver.deployment-spec.yaml | 12 +- .../operator-lifecycle-manager/e2e.Dockerfile | 1 + .../pkg/controller/bundle/bundle_unpacker.go | 36 ++++- .../controller/bundle/bundle_unpacker_test.go | 153 ++++++++++++++++++ .../controller/operators/catalog/operator.go | 12 +- .../operators/catalog/operator_test.go | 4 +- .../registry/reconciler/configmap.go | 16 +- .../registry/reconciler/configmap_test.go | 8 +- .../controller/registry/reconciler/grpc.go | 16 +- .../registry/reconciler/grpc_test.go | 4 +- .../registry/reconciler/reconciler.go | 42 +++-- .../registry/reconciler/reconciler_test.go | 19 ++- .../test/e2e/operator_groups_e2e_test.go | 43 +++-- .../cmd/catalog/main.go | 6 + .../cmd/catalog/start.go | 2 + .../pkg/controller/bundle/bundle_unpacker.go | 36 ++++- .../controller/operators/catalog/operator.go | 12 +- .../registry/reconciler/configmap.go | 16 +- .../controller/registry/reconciler/grpc.go | 16 +- .../registry/reconciler/reconciler.go | 42 +++-- 27 files changed, 424 insertions(+), 106 deletions(-) diff --git a/staging/operator-lifecycle-manager/Dockerfile b/staging/operator-lifecycle-manager/Dockerfile index 961037796a..b28073b651 100644 --- a/staging/operator-lifecycle-manager/Dockerfile +++ b/staging/operator-lifecycle-manager/Dockerfile @@ -33,6 +33,7 @@ COPY --from=builder /build/bin/olm /bin/olm COPY --from=builder /build/bin/catalog /bin/catalog COPY --from=builder /build/bin/package-server /bin/package-server COPY --from=builder /build/bin/cpb /bin/cpb +USER 1001 EXPOSE 8080 EXPOSE 5443 CMD ["/bin/olm"] diff --git a/staging/operator-lifecycle-manager/Dockerfile.goreleaser b/staging/operator-lifecycle-manager/Dockerfile.goreleaser index 6a3238d015..a117f6623e 100644 --- a/staging/operator-lifecycle-manager/Dockerfile.goreleaser +++ b/staging/operator-lifecycle-manager/Dockerfile.goreleaser @@ -10,4 +10,5 @@ COPY package-server /bin/package-server COPY cpb /bin/cpb EXPOSE 8080 EXPOSE 5443 +USER 1001 ENTRYPOINT ["/bin/olm"] diff --git a/staging/operator-lifecycle-manager/cmd/catalog/main.go b/staging/operator-lifecycle-manager/cmd/catalog/main.go index ead86cd763..20134d71d2 100644 --- a/staging/operator-lifecycle-manager/cmd/catalog/main.go +++ b/staging/operator-lifecycle-manager/cmd/catalog/main.go @@ -30,6 +30,7 @@ const ( defaultOPMImage = "quay.io/operator-framework/upstream-opm-builder:latest" defaultUtilImage = "quay.io/operator-framework/olm:latest" defaultOperatorName = "" + defaultWorkLoadUserID = int64(1001) ) // config flags defined globally so that they appear on the test binary as well @@ -83,6 +84,10 @@ func (o *options) run(ctx context.Context, logger *logrus.Logger) error { return fmt.Errorf("error configuring client: %s", err.Error()) } + workloadUserID := int64(-1) + if o.setWorkloadUserID { + workloadUserID = defaultWorkLoadUserID + } // TODO(tflannag): Use options pattern for catalog operator // Create a new instance of the operator. op, err := catalog.NewOperator( @@ -98,6 +103,7 @@ func (o *options) run(ctx context.Context, logger *logrus.Logger) error { k8sscheme.Scheme, o.installPlanTimeout, o.bundleUnpackTimeout, + workloadUserID, ) if err != nil { return fmt.Errorf("error configuring catalog operator: %s", err.Error()) diff --git a/staging/operator-lifecycle-manager/cmd/catalog/start.go b/staging/operator-lifecycle-manager/cmd/catalog/start.go index 45a0953c08..7161131880 100644 --- a/staging/operator-lifecycle-manager/cmd/catalog/start.go +++ b/staging/operator-lifecycle-manager/cmd/catalog/start.go @@ -25,6 +25,7 @@ type options struct { tlsKeyPath string tlsCertPath string clientCAPath string + setWorkloadUserID bool installPlanTimeout time.Duration bundleUnpackTimeout time.Duration @@ -66,6 +67,7 @@ func newRootCmd() *cobra.Command { cmd.Flags().StringVar(&o.opmImage, "opmImage", defaultOPMImage, "the image to use for unpacking bundle content with opm") cmd.Flags().StringVar(&o.utilImage, "util-image", defaultUtilImage, "an image containing custom olm utilities") cmd.Flags().StringVar(&o.writeStatusName, "writeStatusName", defaultOperatorName, "ClusterOperator name in which to write status, set to \"\" to disable.") + cmd.Flags().BoolVar(&o.setWorkloadUserID, "set-workload-user-id", false, "set user ID for all workloads (registry pods/bundle unpack jobs to default 1001") cmd.Flags().BoolVar(&o.debug, "debug", false, "use debug log level") cmd.Flags().BoolVar(&o.version, "version", false, "displays the olm version") diff --git a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml index d42268a59b..6575fb2ea4 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml @@ -2,9 +2,15 @@ apiVersion: v1 kind: Namespace metadata: name: {{ .Values.namespace }} + labels: + pod-security.kubernetes.io/enforce: restricted + pod-security.kubernetes.io/enforce-version: latest --- apiVersion: v1 kind: Namespace metadata: name: {{ .Values.operator_namespace }} + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: latest diff --git a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml index b71fbebd0d..825d39f0df 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml @@ -17,6 +17,10 @@ spec: labels: app: olm-operator spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount {{- if or .Values.olm.tlsSecret .Values.olm.clientCASecret }} volumes: @@ -33,6 +37,10 @@ spec: {{- end }} containers: - name: olm-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] {{- if or .Values.olm.tlsSecret .Values.olm.clientCASecret }} volumeMounts: {{- end }} diff --git a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml index e71ffb38b1..f3fbe99993 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml @@ -17,6 +17,10 @@ spec: labels: app: catalog-operator spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount {{- if or .Values.catalog.tlsSecret .Values.catalog.clientCASecret }} volumes: @@ -33,6 +37,10 @@ spec: {{- end }} containers: - name: catalog-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] {{- if or .Values.catalog.tlsSecret .Values.catalog.clientCASecret }} volumeMounts: {{- end }} @@ -76,6 +84,8 @@ spec: - --client-ca - /profile-collector-cert/tls.crt {{- end }} + - --set-workload-user-id + - "true" image: {{ .Values.catalog.image.ref }} imagePullPolicy: {{ .Values.catalog.image.pullPolicy }} ports: diff --git a/staging/operator-lifecycle-manager/deploy/chart/templates/_packageserver.deployment-spec.yaml b/staging/operator-lifecycle-manager/deploy/chart/templates/_packageserver.deployment-spec.yaml index 6121a85bb5..75cdc5b508 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/templates/_packageserver.deployment-spec.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/templates/_packageserver.deployment-spec.yaml @@ -14,6 +14,10 @@ spec: labels: app: packageserver spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount {{- if .Values.package.nodeSelector }} nodeSelector: @@ -25,6 +29,10 @@ spec: {{- end }} containers: - name: packageserver + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] command: - /bin/package-server - -v=4 @@ -61,10 +69,6 @@ spec: resources: {{ toYaml .Values.package.resources | indent 10 }} {{- end }} - {{- if .Values.package.securityContext }} - securityContext: - runAsUser: {{ .Values.package.securityContext.runAsUser }} - {{- end }} volumeMounts: - name: tmpfs mountPath: /tmp diff --git a/staging/operator-lifecycle-manager/e2e.Dockerfile b/staging/operator-lifecycle-manager/e2e.Dockerfile index 6428974668..4f450f2d9a 100644 --- a/staging/operator-lifecycle-manager/e2e.Dockerfile +++ b/staging/operator-lifecycle-manager/e2e.Dockerfile @@ -3,4 +3,5 @@ FROM busybox COPY olm catalog package-server wait cpb /bin/ EXPOSE 8080 EXPOSE 5443 +USER 1001 CMD ["/bin/olm"] diff --git a/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go b/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go index 55665aada6..bf797ba1fd 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go +++ b/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go @@ -22,6 +22,7 @@ import ( listersbatchv1 "k8s.io/client-go/listers/batch/v1" listerscorev1 "k8s.io/client-go/listers/core/v1" listersrbacv1 "k8s.io/client-go/listers/rbac/v1" + "k8s.io/utils/pointer" "github.com/operator-framework/api/pkg/operators/reference" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -101,6 +102,11 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string // See: https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy RestartPolicy: corev1.RestartPolicyNever, ImagePullSecrets: secrets, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -129,6 +135,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -148,6 +160,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -170,6 +188,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -193,7 +217,10 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string job.SetNamespace(cmRef.Namespace) job.SetName(cmRef.Name) job.SetOwnerReferences([]metav1.OwnerReference{ownerRef(cmRef)}) - + if c.runAsUser > 0 { + job.Spec.Template.Spec.SecurityContext.RunAsUser = &c.runAsUser + job.Spec.Template.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true) + } // By default the BackoffLimit is set to 6 which with exponential backoff 10s + 20s + 40s ... // translates to ~10m of waiting time. // We want to fail faster than that when we have repeated failures from the bundle unpack pod @@ -246,6 +273,7 @@ type ConfigMapUnpacker struct { loader *configmap.BundleLoader now func() metav1.Time unpackTimeout time.Duration + runAsUser int64 } type ConfigMapUnpackerOption func(*ConfigMapUnpacker) @@ -335,6 +363,12 @@ func WithNow(now func() metav1.Time) ConfigMapUnpackerOption { } } +func WithUserID(id int64) ConfigMapUnpackerOption { + return func(unpacker *ConfigMapUnpacker) { + unpacker.runAsUser = id + } +} + func (c *ConfigMapUnpacker) apply(options ...ConfigMapUnpackerOption) { for _, option := range options { option(c) diff --git a/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go b/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go index 4e580ffc36..85fe2bbf62 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go +++ b/staging/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker_test.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/utils/pointer" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" crfake "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake" @@ -32,6 +33,7 @@ const ( opmImage = "opm-image" utilImage = "util-image" bundlePath = "bundle-path" + runAsUser = 1001 ) func TestConfigMapUnpacker(t *testing.T) { @@ -220,6 +222,13 @@ func TestConfigMapUnpacker(t *testing.T) { Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, ImagePullSecrets: []corev1.LocalObjectReference{{Name: "my-secret"}}, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -243,6 +252,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -262,6 +277,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -284,6 +305,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -397,6 +424,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -420,6 +454,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -439,6 +479,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -461,6 +507,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -615,6 +667,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -638,6 +697,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -657,6 +722,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -679,6 +750,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -827,6 +904,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -850,6 +934,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -869,6 +959,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -891,6 +987,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -1009,6 +1111,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -1032,6 +1141,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -1051,6 +1166,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -1073,6 +1194,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -1202,6 +1329,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -1225,6 +1359,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -1244,6 +1384,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -1266,6 +1412,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -1341,6 +1493,7 @@ func TestConfigMapUnpacker(t *testing.T) { WithUtilImage(utilImage), WithNow(now), WithUnpackTimeout(defaultUnpackDuration), + WithUserID(int64(runAsUser)), ) require.NoError(t, err) diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go index 658cd93a34..536d51bece 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go @@ -125,7 +125,7 @@ type Operator struct { type CatalogSourceSyncFunc func(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) // NewOperator creates a new Catalog Operator. -func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration) (*Operator, error) { +func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration, workloadUserID int64) (*Operator, error) { resyncPeriod := queueinformer.ResyncWithJitter(resync, 0.2) config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { @@ -194,7 +194,7 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo op.sources = grpc.NewSourceStore(logger, 10*time.Second, 10*time.Minute, op.syncSourceState) op.sourceInvalidator = resolver.SourceProviderFromRegistryClientProvider(op.sources, logger) resolverSourceProvider := NewOperatorGroupToggleSourceProvider(op.sourceInvalidator, logger, op.lister.OperatorsV1().OperatorGroupLister()) - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient) + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient, workloadUserID) res := resolver.NewOperatorStepResolver(lister, crClient, operatorNamespace, resolverSourceProvider, logger) op.resolver = resolver.NewInstrumentedResolver(res, metrics.RegisterDependencyResolutionSuccess, metrics.RegisterDependencyResolutionFailure) @@ -433,6 +433,7 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo bundle.WithUtilImage(utilImage), bundle.WithNow(op.now), bundle.WithUnpackTimeout(op.bundleUnpackTimeout), + bundle.WithUserID(workloadUserID), ) if err != nil { return nil, err @@ -1404,13 +1405,6 @@ type UnpackedBundleReference struct { Properties string `json:"properties"` } -/* unpackBundles makes one walk through the bundlelookups and attempts to progress them -Returns: - unpacked: bool - If the bundle was successfully unpacked - out: *v1alpha1.InstallPlan - the resulting installPlan - error: error -*/ - func (o *Operator) unpackBundles(plan *v1alpha1.InstallPlan) (bool, *v1alpha1.InstallPlan, error) { out := plan.DeepCopy() unpacked := true diff --git a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go index b7e2431480..51b07250c7 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go +++ b/staging/operator-lifecycle-manager/pkg/controller/operators/catalog/operator_test.go @@ -1606,7 +1606,7 @@ func NewFakeOperator(ctx context.Context, namespace string, namespaces []string, } applier := controllerclient.NewFakeApplier(s, "testowner") - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, op.opClient, "test:pod", op.now, applier) + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, op.opClient, "test:pod", op.now, applier, 1001) } op.RunInformers(ctx) @@ -1758,7 +1758,7 @@ func toManifest(t *testing.T, obj runtime.Object) string { } func pod(s v1alpha1.CatalogSource) *corev1.Pod { - pod := reconciler.Pod(&s, "registry-server", s.Spec.Image, s.GetName(), s.GetLabels(), s.GetAnnotations(), 5, 10) + pod := reconciler.Pod(&s, "registry-server", s.Spec.Image, s.GetName(), s.GetLabels(), s.GetAnnotations(), 5, 10, 1001) ownerutil.AddOwner(pod, &s, false, false) return pod } diff --git a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go index 0f8518d4c5..f264d1bee3 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go +++ b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go @@ -23,6 +23,7 @@ import ( // configMapCatalogSourceDecorator wraps CatalogSource to add additional methods type configMapCatalogSourceDecorator struct { *v1alpha1.CatalogSource + runAsUser int64 } const ( @@ -101,7 +102,7 @@ func (s *configMapCatalogSourceDecorator) Service() *corev1.Service { } func (s *configMapCatalogSourceDecorator) Pod(image string) *corev1.Pod { - pod := Pod(s.CatalogSource, "configmap-registry-server", image, "", s.Labels(), s.Annotations(), 5, 5) + pod := Pod(s.CatalogSource, "configmap-registry-server", image, "", s.Labels(), s.Annotations(), 5, 5, s.runAsUser) pod.Spec.ServiceAccountName = s.GetName() + ConfigMapServerPostfix pod.Spec.Containers[0].Command = []string{"configmap-server", "-c", s.Spec.ConfigMap, "-n", s.GetNamespace()} ownerutil.AddOwner(pod, s.CatalogSource, false, false) @@ -162,10 +163,11 @@ func (s *configMapCatalogSourceDecorator) RoleBinding() *rbacv1.RoleBinding { } type ConfigMapRegistryReconciler struct { - now nowFunc - Lister operatorlister.OperatorLister - OpClient operatorclient.ClientInterface - Image string + now nowFunc + Lister operatorlister.OperatorLister + OpClient operatorclient.ClientInterface + Image string + createPodAsUser int64 } var _ RegistryEnsurer = &ConfigMapRegistryReconciler{} @@ -240,7 +242,7 @@ func (c *ConfigMapRegistryReconciler) currentPodsWithCorrectResourceVersion(sour // EnsureRegistryServer ensures that all components of registry server are up to date. func (c *ConfigMapRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error { - source := configMapCatalogSourceDecorator{catalogSource} + source := configMapCatalogSourceDecorator{catalogSource, c.createPodAsUser} image := c.Image if source.Spec.SourceType == "grpc" { @@ -389,7 +391,7 @@ func (c *ConfigMapRegistryReconciler) ensureService(source configMapCatalogSourc // CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise. func (c *ConfigMapRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) { - source := configMapCatalogSourceDecorator{catalogSource} + source := configMapCatalogSourceDecorator{catalogSource, c.createPodAsUser} image := c.Image if source.Spec.SourceType == "grpc" { diff --git a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap_test.go b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap_test.go index ddd0218661..281217a1bf 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap_test.go +++ b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap_test.go @@ -30,6 +30,7 @@ import ( const ( registryImageName = "test:image" + runAsUser = 1001 testNamespace = "testns" ) @@ -104,6 +105,7 @@ func fakeReconcilerFactory(t *testing.T, stopc <-chan struct{}, options ...fakeR OpClient: opClientFake, Lister: lister, ConfigMapServerImage: config.configMapServerImage, + createPodAsUser: runAsUser, } var hasSyncedCheckFns []cache.InformerSynced @@ -186,7 +188,7 @@ func objectsForCatalogSource(catsrc *v1alpha1.CatalogSource) []runtime.Object { var objs []runtime.Object switch catsrc.Spec.SourceType { case v1alpha1.SourceTypeInternal, v1alpha1.SourceTypeConfigmap: - decorated := configMapCatalogSourceDecorator{catsrc} + decorated := configMapCatalogSourceDecorator{catsrc, runAsUser} objs = clientfake.AddSimpleGeneratedNames( clientfake.AddSimpleGeneratedName(decorated.Pod(registryImageName)), decorated.Service(), @@ -196,7 +198,7 @@ func objectsForCatalogSource(catsrc *v1alpha1.CatalogSource) []runtime.Object { ) case v1alpha1.SourceTypeGrpc: if catsrc.Spec.Image != "" { - decorated := grpcCatalogSourceDecorator{catsrc} + decorated := grpcCatalogSourceDecorator{catsrc, runAsUser} objs = clientfake.AddSimpleGeneratedNames( decorated.Pod(catsrc.GetName()), decorated.Service(), @@ -451,7 +453,7 @@ func TestConfigMapRegistryReconciler(t *testing.T) { } // if no error, the reconciler should create the same set of kube objects every time - decorated := configMapCatalogSourceDecorator{tt.in.catsrc} + decorated := configMapCatalogSourceDecorator{tt.in.catsrc, runAsUser} pod := decorated.Pod(registryImageName) listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{CatalogSourceLabelKey: tt.in.catsrc.GetName()}).String()} diff --git a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go index 4f63cc87aa..93e28e6286 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go +++ b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go @@ -32,6 +32,7 @@ const ( // grpcCatalogSourceDecorator wraps CatalogSource to add additional methods type grpcCatalogSourceDecorator struct { *v1alpha1.CatalogSource + createPodAsUser int64 } type UpdateNotReadyErr struct { @@ -122,16 +123,17 @@ func (s *grpcCatalogSourceDecorator) ServiceAccount() *corev1.ServiceAccount { } func (s *grpcCatalogSourceDecorator) Pod(saName string) *corev1.Pod { - pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, saName, s.Labels(), s.Annotations(), 5, 10) + pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, saName, s.Labels(), s.Annotations(), 5, 10, s.createPodAsUser) ownerutil.AddOwner(pod, s.CatalogSource, false, false) return pod } type GrpcRegistryReconciler struct { - now nowFunc - Lister operatorlister.OperatorLister - OpClient operatorclient.ClientInterface - SSAClient *controllerclient.ServerSideApplier + now nowFunc + Lister operatorlister.OperatorLister + OpClient operatorclient.ClientInterface + SSAClient *controllerclient.ServerSideApplier + createPodAsUser int64 } var _ RegistryReconciler = &GrpcRegistryReconciler{} @@ -198,7 +200,7 @@ func (c *GrpcRegistryReconciler) currentPodsWithCorrectImageAndSpec(source grpcC // EnsureRegistryServer ensures that all components of registry server are up to date. func (c *GrpcRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error { - source := grpcCatalogSourceDecorator{catalogSource} + source := grpcCatalogSourceDecorator{catalogSource, c.createPodAsUser} // if service status is nil, we force create every object to ensure they're created the first time overwrite := source.Status.RegistryServiceStatus == nil || !isRegistryServiceStatusValid(&source) @@ -447,7 +449,7 @@ func (c *GrpcRegistryReconciler) removePods(pods []*corev1.Pod, namespace string // CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise. func (c *GrpcRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) { - source := grpcCatalogSourceDecorator{catalogSource} + source := grpcCatalogSourceDecorator{catalogSource, c.createPodAsUser} // Check on registry resources // TODO: add gRPC health check if len(c.currentPodsWithCorrectImageAndSpec(source, source.ServiceAccount().GetName())) < 1 || diff --git a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc_test.go b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc_test.go index 42a2bde3ad..c219329b3d 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc_test.go +++ b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc_test.go @@ -331,7 +331,7 @@ func TestGrpcRegistryReconciler(t *testing.T) { } // Check for resource existence - decorated := grpcCatalogSourceDecorator{tt.in.catsrc} + decorated := grpcCatalogSourceDecorator{tt.in.catsrc, runAsUser} pod := decorated.Pod(tt.in.catsrc.GetName()) service := decorated.Service() sa := decorated.ServiceAccount() @@ -421,7 +421,7 @@ func TestRegistryPodPriorityClass(t *testing.T) { require.NoError(t, err) // Check for resource existence - decorated := grpcCatalogSourceDecorator{tt.in.catsrc} + decorated := grpcCatalogSourceDecorator{tt.in.catsrc, runAsUser} pod := decorated.Pod(tt.in.catsrc.GetName()) listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{CatalogSourceLabelKey: tt.in.catsrc.GetName()}).String()} outPods, podErr := client.KubernetesInterface().CoreV1().Pods(pod.GetNamespace()).List(context.TODO(), listOptions) diff --git a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go index 6089d2e777..45bf8dcba4 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go +++ b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/pointer" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" @@ -61,6 +62,7 @@ type registryReconcilerFactory struct { OpClient operatorclient.ClientInterface ConfigMapServerImage string SSAClient *controllerclient.ServerSideApplier + createPodAsUser int64 } // ReconcilerForSource returns a RegistryReconciler based on the configuration of the given CatalogSource. @@ -69,18 +71,20 @@ func (r *registryReconcilerFactory) ReconcilerForSource(source *operatorsv1alpha switch source.Spec.SourceType { case operatorsv1alpha1.SourceTypeInternal, operatorsv1alpha1.SourceTypeConfigmap: return &ConfigMapRegistryReconciler{ - now: r.now, - Lister: r.Lister, - OpClient: r.OpClient, - Image: r.ConfigMapServerImage, + now: r.now, + Lister: r.Lister, + OpClient: r.OpClient, + Image: r.ConfigMapServerImage, + createPodAsUser: r.createPodAsUser, } case operatorsv1alpha1.SourceTypeGrpc: if source.Spec.Image != "" { return &GrpcRegistryReconciler{ - now: r.now, - Lister: r.Lister, - OpClient: r.OpClient, - SSAClient: r.SSAClient, + now: r.now, + Lister: r.Lister, + OpClient: r.OpClient, + SSAClient: r.SSAClient, + createPodAsUser: r.createPodAsUser, } } else if source.Spec.Address != "" { return &GrpcAddressRegistryReconciler{ @@ -92,17 +96,18 @@ func (r *registryReconcilerFactory) ReconcilerForSource(source *operatorsv1alpha } // NewRegistryReconcilerFactory returns an initialized RegistryReconcilerFactory. -func NewRegistryReconcilerFactory(lister operatorlister.OperatorLister, opClient operatorclient.ClientInterface, configMapServerImage string, now nowFunc, ssaClient *controllerclient.ServerSideApplier) RegistryReconcilerFactory { +func NewRegistryReconcilerFactory(lister operatorlister.OperatorLister, opClient operatorclient.ClientInterface, configMapServerImage string, now nowFunc, ssaClient *controllerclient.ServerSideApplier, createPodAsUser int64) RegistryReconcilerFactory { return ®istryReconcilerFactory{ now: now, Lister: lister, OpClient: opClient, ConfigMapServerImage: configMapServerImage, SSAClient: ssaClient, + createPodAsUser: createPodAsUser, } } -func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saName string, labels map[string]string, annotations map[string]string, readinessDelay int32, livenessDelay int32) *corev1.Pod { +func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saName string, labels map[string]string, annotations map[string]string, readinessDelay int32, livenessDelay int32, runAsUser int64) *corev1.Pod { // Ensure the catalog image is always pulled if the image is not based on a digest, measured by whether an "@" is included. // See https://github.com/docker/distribution/blob/master/reference/reference.go for more info. // This means recreating non-digest based catalog pods will result in the latest version of the catalog content being delivered on-cluster. @@ -125,8 +130,6 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN podAnnotations[key] = value } - readOnlyRootFilesystem := false - pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: source.GetName() + "-", @@ -179,7 +182,11 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN }, }, SecurityContext: &corev1.SecurityContext{ - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + ReadOnlyRootFilesystem: pointer.Bool(false), + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, }, ImagePullPolicy: pullPolicy, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, @@ -188,10 +195,19 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN NodeSelector: map[string]string{ "kubernetes.io/os": "linux", }, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, ServiceAccountName: saName, }, } + if runAsUser > 0 { + pod.Spec.SecurityContext.RunAsUser = &runAsUser + pod.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true) + } // Override scheduling options if specified if source.Spec.GrpcPodConfig != nil { grpcPodConfig := source.Spec.GrpcPodConfig diff --git a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler_test.go b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler_test.go index ad9c82c6de..1ed7eed2fa 100644 --- a/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler_test.go +++ b/staging/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler_test.go @@ -10,6 +10,8 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" ) +const workloadUserID = 1001 + func TestPodNodeSelector(t *testing.T) { catsrc := &v1alpha1.CatalogSource{ ObjectMeta: metav1.ObjectMeta{ @@ -21,7 +23,7 @@ func TestPodNodeSelector(t *testing.T) { key := "kubernetes.io/os" value := "linux" - gotCatSrcPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0)) + gotCatSrcPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0), int64(workloadUserID)) gotCatSrcPodSelector := gotCatSrcPod.Spec.NodeSelector if gotCatSrcPodSelector[key] != value { @@ -69,7 +71,7 @@ func TestPullPolicy(t *testing.T) { } for _, tt := range table { - p := Pod(source, "catalog", tt.image, "", nil, nil, int32(0), int32(0)) + p := Pod(source, "catalog", tt.image, "", nil, nil, int32(0), int32(0), int64(workloadUserID)) policy := p.Spec.Containers[0].ImagePullPolicy if policy != tt.policy { t.Fatalf("expected pull policy %s for image %s", tt.policy, tt.image) @@ -79,8 +81,13 @@ func TestPullPolicy(t *testing.T) { func TestPodContainerSecurityContext(t *testing.T) { expectedReadOnlyRootFilesystem := false + allowPrivilegeEscalation := false expectedContainerSecCtx := &corev1.SecurityContext{ - ReadOnlyRootFilesystem: &expectedReadOnlyRootFilesystem, + ReadOnlyRootFilesystem: &expectedReadOnlyRootFilesystem, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, } catsrc := &v1alpha1.CatalogSource{ @@ -90,7 +97,7 @@ func TestPodContainerSecurityContext(t *testing.T) { }, } - gotPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0)) + gotPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0), int64(workloadUserID)) gotContainerSecCtx := gotPod.Spec.Containers[0].SecurityContext require.Equal(t, expectedContainerSecCtx, gotContainerSecCtx) } @@ -115,7 +122,7 @@ func TestPodAvoidsConcurrentWrite(t *testing.T) { "annotation": "somethingelse", } - gotPod := Pod(catsrc, "hello", "busybox", "", labels, annotations, int32(0), int32(0)) + gotPod := Pod(catsrc, "hello", "busybox", "", labels, annotations, int32(0), int32(0), int64(workloadUserID)) // check labels and annotations point to different addresses between parameters and what's in the pod require.NotEqual(t, &labels, &gotPod.Labels) @@ -295,7 +302,7 @@ func TestPodSchedulingOverrides(t *testing.T) { } for _, testCase := range testCases { - pod := Pod(testCase.catalogSource, "hello", "busybox", "", map[string]string{}, testCase.annotations, int32(0), int32(0)) + pod := Pod(testCase.catalogSource, "hello", "busybox", "", map[string]string{}, testCase.annotations, int32(0), int32(0), int64(workloadUserID)) require.Equal(t, testCase.expectedNodeSelectors, pod.Spec.NodeSelector) require.Equal(t, testCase.expectedPriorityClassName, pod.Spec.PriorityClassName) require.Equal(t, testCase.expectedTolerations, pod.Spec.Tolerations) diff --git a/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go b/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go index 413374eb8f..553db457ea 100644 --- a/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go +++ b/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go @@ -1799,8 +1799,7 @@ var _ = Describe("Operator Group", func() { // Versions of OLM at 0.14.1 and older had a bug that would place the wrong namespace annotation on copied CSVs, // preventing them from being GCd. This ensures that any leftover CSVs in that state are properly cleared up. - // issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/2644 - It("[FLAKE] cleanup csvs with bad owner operator groups", func() { + It("cleanup csvs with bad namespace annotation", func() { csvName := genName("another-csv-") // must be lowercase for DNS-1123 validation @@ -2013,18 +2012,28 @@ var _ = Describe("Operator Group", func() { return false, nil }) require.NoError(GinkgoT(), err) - - // Give copied CSV a bad operatorgroup annotation - updateCSV := func() error { - fetchedCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) - require.NoError(GinkgoT(), err) + GinkgoT().Log("Copied CSV showed up in other namespace, giving copied CSV a bad OpertorGroup annotation") + err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { + fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) + if fetchErr != nil { + return false, fetchErr + } fetchedCSV.Annotations[v1.OperatorGroupNamespaceAnnotationKey] = fetchedCSV.GetNamespace() - _, err = crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Update(context.TODO(), fetchedCSV, metav1.UpdateOptions{}) - return err - } - require.NoError(GinkgoT(), retry.RetryOnConflict(retry.DefaultBackoff, updateCSV)) - - // wait for CSV to be gc'd + _, updateErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Update(context.TODO(), fetchedCSV, metav1.UpdateOptions{}) + if updateErr != nil { + return false, updateErr + } + updatedCSV, updatedfetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) + if updatedfetchErr != nil { + return false, updatedfetchErr + } + if updatedCSV.Annotations[v1.OperatorGroupNamespaceAnnotationKey] == fetchedCSV.GetNamespace() { + return true, nil + } + return false, nil + }) + require.NoError(GinkgoT(), err) + GinkgoT().Log("Done updating copied CSV with bad annotation OperatorGroup, waiting for CSV to be gc'd") err = wait.Poll(pollInterval, 2*pollDuration, func() (bool, error) { csv, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) if fetchErr != nil { @@ -2034,8 +2043,12 @@ var _ = Describe("Operator Group", func() { GinkgoT().Logf("Error (in %v): %v", opGroupNamespace, fetchErr.Error()) return false, fetchErr } - GinkgoT().Logf("%#v", csv.Annotations) - GinkgoT().Logf(csv.GetNamespace()) + // The CSV with the wrong annotation could have been replaced with a new copied CSV by this time + // If we find a CSV in the namespace, and it contains the correct annotation, it means the CSV + // with the wrong annotation was GCed + if csv.Annotations[v1.OperatorGroupNamespaceAnnotationKey] != csv.GetNamespace() { + return true, nil + } return false, nil }) require.NoError(GinkgoT(), err) diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/main.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/main.go index ead86cd763..20134d71d2 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/main.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/main.go @@ -30,6 +30,7 @@ const ( defaultOPMImage = "quay.io/operator-framework/upstream-opm-builder:latest" defaultUtilImage = "quay.io/operator-framework/olm:latest" defaultOperatorName = "" + defaultWorkLoadUserID = int64(1001) ) // config flags defined globally so that they appear on the test binary as well @@ -83,6 +84,10 @@ func (o *options) run(ctx context.Context, logger *logrus.Logger) error { return fmt.Errorf("error configuring client: %s", err.Error()) } + workloadUserID := int64(-1) + if o.setWorkloadUserID { + workloadUserID = defaultWorkLoadUserID + } // TODO(tflannag): Use options pattern for catalog operator // Create a new instance of the operator. op, err := catalog.NewOperator( @@ -98,6 +103,7 @@ func (o *options) run(ctx context.Context, logger *logrus.Logger) error { k8sscheme.Scheme, o.installPlanTimeout, o.bundleUnpackTimeout, + workloadUserID, ) if err != nil { return fmt.Errorf("error configuring catalog operator: %s", err.Error()) diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/start.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/start.go index 45a0953c08..7161131880 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/start.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/cmd/catalog/start.go @@ -25,6 +25,7 @@ type options struct { tlsKeyPath string tlsCertPath string clientCAPath string + setWorkloadUserID bool installPlanTimeout time.Duration bundleUnpackTimeout time.Duration @@ -66,6 +67,7 @@ func newRootCmd() *cobra.Command { cmd.Flags().StringVar(&o.opmImage, "opmImage", defaultOPMImage, "the image to use for unpacking bundle content with opm") cmd.Flags().StringVar(&o.utilImage, "util-image", defaultUtilImage, "an image containing custom olm utilities") cmd.Flags().StringVar(&o.writeStatusName, "writeStatusName", defaultOperatorName, "ClusterOperator name in which to write status, set to \"\" to disable.") + cmd.Flags().BoolVar(&o.setWorkloadUserID, "set-workload-user-id", false, "set user ID for all workloads (registry pods/bundle unpack jobs to default 1001") cmd.Flags().BoolVar(&o.debug, "debug", false, "use debug log level") cmd.Flags().BoolVar(&o.version, "version", false, "displays the olm version") diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go index 55665aada6..bf797ba1fd 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/bundle/bundle_unpacker.go @@ -22,6 +22,7 @@ import ( listersbatchv1 "k8s.io/client-go/listers/batch/v1" listerscorev1 "k8s.io/client-go/listers/core/v1" listersrbacv1 "k8s.io/client-go/listers/rbac/v1" + "k8s.io/utils/pointer" "github.com/operator-framework/api/pkg/operators/reference" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -101,6 +102,11 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string // See: https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy RestartPolicy: corev1.RestartPolicyNever, ImagePullSecrets: secrets, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -129,6 +135,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -148,6 +160,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -170,6 +188,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -193,7 +217,10 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string job.SetNamespace(cmRef.Namespace) job.SetName(cmRef.Name) job.SetOwnerReferences([]metav1.OwnerReference{ownerRef(cmRef)}) - + if c.runAsUser > 0 { + job.Spec.Template.Spec.SecurityContext.RunAsUser = &c.runAsUser + job.Spec.Template.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true) + } // By default the BackoffLimit is set to 6 which with exponential backoff 10s + 20s + 40s ... // translates to ~10m of waiting time. // We want to fail faster than that when we have repeated failures from the bundle unpack pod @@ -246,6 +273,7 @@ type ConfigMapUnpacker struct { loader *configmap.BundleLoader now func() metav1.Time unpackTimeout time.Duration + runAsUser int64 } type ConfigMapUnpackerOption func(*ConfigMapUnpacker) @@ -335,6 +363,12 @@ func WithNow(now func() metav1.Time) ConfigMapUnpackerOption { } } +func WithUserID(id int64) ConfigMapUnpackerOption { + return func(unpacker *ConfigMapUnpacker) { + unpacker.runAsUser = id + } +} + func (c *ConfigMapUnpacker) apply(options ...ConfigMapUnpackerOption) { for _, option := range options { option(c) diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go index 658cd93a34..536d51bece 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog/operator.go @@ -125,7 +125,7 @@ type Operator struct { type CatalogSourceSyncFunc func(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) // NewOperator creates a new Catalog Operator. -func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration) (*Operator, error) { +func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration, workloadUserID int64) (*Operator, error) { resyncPeriod := queueinformer.ResyncWithJitter(resync, 0.2) config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { @@ -194,7 +194,7 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo op.sources = grpc.NewSourceStore(logger, 10*time.Second, 10*time.Minute, op.syncSourceState) op.sourceInvalidator = resolver.SourceProviderFromRegistryClientProvider(op.sources, logger) resolverSourceProvider := NewOperatorGroupToggleSourceProvider(op.sourceInvalidator, logger, op.lister.OperatorsV1().OperatorGroupLister()) - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient) + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient, workloadUserID) res := resolver.NewOperatorStepResolver(lister, crClient, operatorNamespace, resolverSourceProvider, logger) op.resolver = resolver.NewInstrumentedResolver(res, metrics.RegisterDependencyResolutionSuccess, metrics.RegisterDependencyResolutionFailure) @@ -433,6 +433,7 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo bundle.WithUtilImage(utilImage), bundle.WithNow(op.now), bundle.WithUnpackTimeout(op.bundleUnpackTimeout), + bundle.WithUserID(workloadUserID), ) if err != nil { return nil, err @@ -1404,13 +1405,6 @@ type UnpackedBundleReference struct { Properties string `json:"properties"` } -/* unpackBundles makes one walk through the bundlelookups and attempts to progress them -Returns: - unpacked: bool - If the bundle was successfully unpacked - out: *v1alpha1.InstallPlan - the resulting installPlan - error: error -*/ - func (o *Operator) unpackBundles(plan *v1alpha1.InstallPlan) (bool, *v1alpha1.InstallPlan, error) { out := plan.DeepCopy() unpacked := true diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go index 0f8518d4c5..f264d1bee3 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/configmap.go @@ -23,6 +23,7 @@ import ( // configMapCatalogSourceDecorator wraps CatalogSource to add additional methods type configMapCatalogSourceDecorator struct { *v1alpha1.CatalogSource + runAsUser int64 } const ( @@ -101,7 +102,7 @@ func (s *configMapCatalogSourceDecorator) Service() *corev1.Service { } func (s *configMapCatalogSourceDecorator) Pod(image string) *corev1.Pod { - pod := Pod(s.CatalogSource, "configmap-registry-server", image, "", s.Labels(), s.Annotations(), 5, 5) + pod := Pod(s.CatalogSource, "configmap-registry-server", image, "", s.Labels(), s.Annotations(), 5, 5, s.runAsUser) pod.Spec.ServiceAccountName = s.GetName() + ConfigMapServerPostfix pod.Spec.Containers[0].Command = []string{"configmap-server", "-c", s.Spec.ConfigMap, "-n", s.GetNamespace()} ownerutil.AddOwner(pod, s.CatalogSource, false, false) @@ -162,10 +163,11 @@ func (s *configMapCatalogSourceDecorator) RoleBinding() *rbacv1.RoleBinding { } type ConfigMapRegistryReconciler struct { - now nowFunc - Lister operatorlister.OperatorLister - OpClient operatorclient.ClientInterface - Image string + now nowFunc + Lister operatorlister.OperatorLister + OpClient operatorclient.ClientInterface + Image string + createPodAsUser int64 } var _ RegistryEnsurer = &ConfigMapRegistryReconciler{} @@ -240,7 +242,7 @@ func (c *ConfigMapRegistryReconciler) currentPodsWithCorrectResourceVersion(sour // EnsureRegistryServer ensures that all components of registry server are up to date. func (c *ConfigMapRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error { - source := configMapCatalogSourceDecorator{catalogSource} + source := configMapCatalogSourceDecorator{catalogSource, c.createPodAsUser} image := c.Image if source.Spec.SourceType == "grpc" { @@ -389,7 +391,7 @@ func (c *ConfigMapRegistryReconciler) ensureService(source configMapCatalogSourc // CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise. func (c *ConfigMapRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) { - source := configMapCatalogSourceDecorator{catalogSource} + source := configMapCatalogSourceDecorator{catalogSource, c.createPodAsUser} image := c.Image if source.Spec.SourceType == "grpc" { diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go index 4f63cc87aa..93e28e6286 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/grpc.go @@ -32,6 +32,7 @@ const ( // grpcCatalogSourceDecorator wraps CatalogSource to add additional methods type grpcCatalogSourceDecorator struct { *v1alpha1.CatalogSource + createPodAsUser int64 } type UpdateNotReadyErr struct { @@ -122,16 +123,17 @@ func (s *grpcCatalogSourceDecorator) ServiceAccount() *corev1.ServiceAccount { } func (s *grpcCatalogSourceDecorator) Pod(saName string) *corev1.Pod { - pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, saName, s.Labels(), s.Annotations(), 5, 10) + pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, saName, s.Labels(), s.Annotations(), 5, 10, s.createPodAsUser) ownerutil.AddOwner(pod, s.CatalogSource, false, false) return pod } type GrpcRegistryReconciler struct { - now nowFunc - Lister operatorlister.OperatorLister - OpClient operatorclient.ClientInterface - SSAClient *controllerclient.ServerSideApplier + now nowFunc + Lister operatorlister.OperatorLister + OpClient operatorclient.ClientInterface + SSAClient *controllerclient.ServerSideApplier + createPodAsUser int64 } var _ RegistryReconciler = &GrpcRegistryReconciler{} @@ -198,7 +200,7 @@ func (c *GrpcRegistryReconciler) currentPodsWithCorrectImageAndSpec(source grpcC // EnsureRegistryServer ensures that all components of registry server are up to date. func (c *GrpcRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error { - source := grpcCatalogSourceDecorator{catalogSource} + source := grpcCatalogSourceDecorator{catalogSource, c.createPodAsUser} // if service status is nil, we force create every object to ensure they're created the first time overwrite := source.Status.RegistryServiceStatus == nil || !isRegistryServiceStatusValid(&source) @@ -447,7 +449,7 @@ func (c *GrpcRegistryReconciler) removePods(pods []*corev1.Pod, namespace string // CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise. func (c *GrpcRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) { - source := grpcCatalogSourceDecorator{catalogSource} + source := grpcCatalogSourceDecorator{catalogSource, c.createPodAsUser} // Check on registry resources // TODO: add gRPC health check if len(c.currentPodsWithCorrectImageAndSpec(source, source.ServiceAccount().GetName())) < 1 || diff --git a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go index 6089d2e777..45bf8dcba4 100644 --- a/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go +++ b/vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/reconciler/reconciler.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/pointer" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" @@ -61,6 +62,7 @@ type registryReconcilerFactory struct { OpClient operatorclient.ClientInterface ConfigMapServerImage string SSAClient *controllerclient.ServerSideApplier + createPodAsUser int64 } // ReconcilerForSource returns a RegistryReconciler based on the configuration of the given CatalogSource. @@ -69,18 +71,20 @@ func (r *registryReconcilerFactory) ReconcilerForSource(source *operatorsv1alpha switch source.Spec.SourceType { case operatorsv1alpha1.SourceTypeInternal, operatorsv1alpha1.SourceTypeConfigmap: return &ConfigMapRegistryReconciler{ - now: r.now, - Lister: r.Lister, - OpClient: r.OpClient, - Image: r.ConfigMapServerImage, + now: r.now, + Lister: r.Lister, + OpClient: r.OpClient, + Image: r.ConfigMapServerImage, + createPodAsUser: r.createPodAsUser, } case operatorsv1alpha1.SourceTypeGrpc: if source.Spec.Image != "" { return &GrpcRegistryReconciler{ - now: r.now, - Lister: r.Lister, - OpClient: r.OpClient, - SSAClient: r.SSAClient, + now: r.now, + Lister: r.Lister, + OpClient: r.OpClient, + SSAClient: r.SSAClient, + createPodAsUser: r.createPodAsUser, } } else if source.Spec.Address != "" { return &GrpcAddressRegistryReconciler{ @@ -92,17 +96,18 @@ func (r *registryReconcilerFactory) ReconcilerForSource(source *operatorsv1alpha } // NewRegistryReconcilerFactory returns an initialized RegistryReconcilerFactory. -func NewRegistryReconcilerFactory(lister operatorlister.OperatorLister, opClient operatorclient.ClientInterface, configMapServerImage string, now nowFunc, ssaClient *controllerclient.ServerSideApplier) RegistryReconcilerFactory { +func NewRegistryReconcilerFactory(lister operatorlister.OperatorLister, opClient operatorclient.ClientInterface, configMapServerImage string, now nowFunc, ssaClient *controllerclient.ServerSideApplier, createPodAsUser int64) RegistryReconcilerFactory { return ®istryReconcilerFactory{ now: now, Lister: lister, OpClient: opClient, ConfigMapServerImage: configMapServerImage, SSAClient: ssaClient, + createPodAsUser: createPodAsUser, } } -func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saName string, labels map[string]string, annotations map[string]string, readinessDelay int32, livenessDelay int32) *corev1.Pod { +func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saName string, labels map[string]string, annotations map[string]string, readinessDelay int32, livenessDelay int32, runAsUser int64) *corev1.Pod { // Ensure the catalog image is always pulled if the image is not based on a digest, measured by whether an "@" is included. // See https://github.com/docker/distribution/blob/master/reference/reference.go for more info. // This means recreating non-digest based catalog pods will result in the latest version of the catalog content being delivered on-cluster. @@ -125,8 +130,6 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN podAnnotations[key] = value } - readOnlyRootFilesystem := false - pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: source.GetName() + "-", @@ -179,7 +182,11 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN }, }, SecurityContext: &corev1.SecurityContext{ - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + ReadOnlyRootFilesystem: pointer.Bool(false), + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, }, ImagePullPolicy: pullPolicy, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, @@ -188,10 +195,19 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN NodeSelector: map[string]string{ "kubernetes.io/os": "linux", }, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, ServiceAccountName: saName, }, } + if runAsUser > 0 { + pod.Spec.SecurityContext.RunAsUser = &runAsUser + pod.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true) + } // Override scheduling options if specified if source.Spec.GrpcPodConfig != nil { grpcPodConfig := source.Spec.GrpcPodConfig From 406b2dcf6b0185cf828a7bc640006088bb537320 Mon Sep 17 00:00:00 2001 From: Anik Bhattacharjee Date: Mon, 8 Aug 2022 12:27:25 -0400 Subject: [PATCH 2/4] (manifests): populate pod security configs dynamically (#2831) This PR: * introduces a chart value that decides if the --set-workload-user-id flag to true or false for the catalog-operator container * introduces chart values to fill in the psa enforce level/version for the namespaces Closes #2827 Signed-off-by: Anik Bhattacharjee Upstream-commit: f982e2fbeecfcb917665ed760363326e313b2967 Upstream-repository: operator-lifecycle-manager --- .../chart/templates/0000_50_olm_00-namespace.yaml | 12 ++++++++---- .../0000_50_olm_08-catalog-operator.deployment.yaml | 7 +++++-- .../deploy/chart/values.yaml | 9 +++++++++ .../deploy/upstream/values.yaml | 9 +++++++++ 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml index 6575fb2ea4..1ee5727899 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_00-namespace.yaml @@ -3,8 +3,10 @@ kind: Namespace metadata: name: {{ .Values.namespace }} labels: - pod-security.kubernetes.io/enforce: restricted - pod-security.kubernetes.io/enforce-version: latest + {{- if .Values.namespace_psa }} + pod-security.kubernetes.io/enforce: {{ .Values.namespace_psa.enforceLevel }} + pod-security.kubernetes.io/enforce-version: {{ .Values.namespace_psa.enforceVersion }} + {{- end }} --- apiVersion: v1 @@ -12,5 +14,7 @@ kind: Namespace metadata: name: {{ .Values.operator_namespace }} labels: - pod-security.kubernetes.io/enforce: baseline - pod-security.kubernetes.io/enforce-version: latest + {{- if .Values.operator_namespace_psa }} + pod-security.kubernetes.io/enforce: {{ .Values.operator_namespace_psa.enforceLevel }} + pod-security.kubernetes.io/enforce-version: {{ .Values.operator_namespace_psa.enforceVersion }} + {{- end }} diff --git a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml index f3fbe99993..48e882d4ee 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml @@ -84,8 +84,11 @@ spec: - --client-ca - /profile-collector-cert/tls.crt {{- end }} - - --set-workload-user-id - - "true" + {{- if eq .Values.catalog.setWorkloadUserID true }} + - --set-workload-user-id=true + {{- else }} + - --set-workload-user-id=false + {{ end }} image: {{ .Values.catalog.image.ref }} imagePullPolicy: {{ .Values.catalog.image.pullPolicy }} ports: diff --git a/staging/operator-lifecycle-manager/deploy/chart/values.yaml b/staging/operator-lifecycle-manager/deploy/chart/values.yaml index cf4706edc8..be60b34593 100644 --- a/staging/operator-lifecycle-manager/deploy/chart/values.yaml +++ b/staging/operator-lifecycle-manager/deploy/chart/values.yaml @@ -1,7 +1,15 @@ rbacApiVersion: rbac.authorization.k8s.io namespace: operator-lifecycle-manager +# see https://kubernetes.io/docs/concepts/security/pod-security-admission/ for more details +namespace_psa: + enforceLevel: restricted + enforceVersion: latest catalog_namespace: operator-lifecycle-manager operator_namespace: operators +# see https://kubernetes.io/docs/concepts/security/pod-security-admission/ for more details +operator_namespace_psa: + enforceLevel: baseline + enforceVersion: latest minKubeVersion: 1.11.0 writeStatusName: '""' imagestream: false @@ -25,6 +33,7 @@ olm: memory: 160Mi catalog: + setWorkloadUserID: true replicaCount: 1 commandArgs: --configmapServerImage=quay.io/operator-framework/configmap-operator-registry:latest image: diff --git a/staging/operator-lifecycle-manager/deploy/upstream/values.yaml b/staging/operator-lifecycle-manager/deploy/upstream/values.yaml index ccfa1e0166..dbe19f2296 100644 --- a/staging/operator-lifecycle-manager/deploy/upstream/values.yaml +++ b/staging/operator-lifecycle-manager/deploy/upstream/values.yaml @@ -1,8 +1,16 @@ installType: upstream rbacApiVersion: rbac.authorization.k8s.io namespace: olm +# see https://kubernetes.io/docs/concepts/security/pod-security-admission/ for more details +namespace_psa: + enforceLevel: restricted + enforceVersion: latest catalog_namespace: olm operator_namespace: operators +# see https://kubernetes.io/docs/concepts/security/pod-security-admission/ for more details +operator_namespace_psa: + enforceLevel: baseline + enforceVersion: latest imagestream: false writeStatusName: '""' writePackageServerStatusName: "" @@ -14,6 +22,7 @@ olm: service: internalPort: 8080 catalog: + setWorkloadUserID: true replicaCount: 1 image: ref: quay.io/operator-framework/olm@sha256:e74b2ac57963c7f3ba19122a8c31c9f2a0deb3c0c5cac9e5323ccffd0ca198ed From b28c0105f3c710029c1b6eedb77536175aae8cfe Mon Sep 17 00:00:00 2001 From: Anik Bhattacharjee Date: Wed, 10 Aug 2022 15:18:47 -0400 Subject: [PATCH 3/4] (e2e-fix) do not check for updated csv that is likely to be GC'd (#2837) In the test `Operator Group cleanup csvs with bad namespace annotation`, the polling logic that updates a copied csv with a bad annotation was checking to see if the update was successful. However, once the copied csv is given a bad annotation, the CSV is GC'd, and if the collection happens immediately, the polling logic fails. This fix removes the logic that attempts to check the updated CSV, and instead relies on the updateErr being nil to assert that the update was successful. Signed-off-by: Anik Bhattacharjee Signed-off-by: Anik Bhattacharjee Upstream-commit: e08415d1228d095d0238dcf7264d3b70464e805b Upstream-repository: operator-lifecycle-manager --- .../test/e2e/operator_groups_e2e_test.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go b/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go index 553db457ea..743f8ff37d 100644 --- a/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go +++ b/staging/operator-lifecycle-manager/test/e2e/operator_groups_e2e_test.go @@ -2021,16 +2021,10 @@ var _ = Describe("Operator Group", func() { fetchedCSV.Annotations[v1.OperatorGroupNamespaceAnnotationKey] = fetchedCSV.GetNamespace() _, updateErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Update(context.TODO(), fetchedCSV, metav1.UpdateOptions{}) if updateErr != nil { + GinkgoT().Logf("Error updating copied CSV (in %v): %v", otherNamespaceName, updateErr.Error()) return false, updateErr } - updatedCSV, updatedfetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) - if updatedfetchErr != nil { - return false, updatedfetchErr - } - if updatedCSV.Annotations[v1.OperatorGroupNamespaceAnnotationKey] == fetchedCSV.GetNamespace() { - return true, nil - } - return false, nil + return true, nil }) require.NoError(GinkgoT(), err) GinkgoT().Log("Done updating copied CSV with bad annotation OperatorGroup, waiting for CSV to be gc'd") From 462a0c8560ad31cba3798b281ccf1d2e4ea55515 Mon Sep 17 00:00:00 2001 From: Anik Bhattacharjee Date: Wed, 10 Aug 2022 16:30:38 -0400 Subject: [PATCH 4/4] (manifests) configure manifests to comply to PSA restricted profile --- manifests/0000_50_olm_00-namespace.yaml | 12 +++++++----- ...-operator.deployment.ibm-cloud-managed.yaml | 1 - ...0000_50_olm_06-psm-operator.deployment.yaml | 1 - ...-operator.deployment.ibm-cloud-managed.yaml | 17 ++++++++--------- ...0000_50_olm_07-olm-operator.deployment.yaml | 17 ++++++++--------- ...-operator.deployment.ibm-cloud-managed.yaml | 18 +++++++++--------- ..._50_olm_08-catalog-operator.deployment.yaml | 18 +++++++++--------- pkg/manifests/csv.yaml | 17 ++++++++--------- scripts/catalog-deployment.patch.yaml | 1 - scripts/generate_crds_manifests.sh | 3 ++- scripts/olm-deployment.patch.yaml | 1 - scripts/packageserver-deployment.patch.yaml | 1 - values.yaml | 4 ++++ 13 files changed, 55 insertions(+), 56 deletions(-) diff --git a/manifests/0000_50_olm_00-namespace.yaml b/manifests/0000_50_olm_00-namespace.yaml index fc1de8ca9b..8fffa527e0 100644 --- a/manifests/0000_50_olm_00-namespace.yaml +++ b/manifests/0000_50_olm_00-namespace.yaml @@ -2,23 +2,25 @@ apiVersion: v1 kind: Namespace metadata: name: openshift-operator-lifecycle-manager + labels: + openshift.io/scc: "anyuid" + openshift.io/cluster-monitoring: "true" annotations: openshift.io/node-selector: "" workload.openshift.io/allowed: "management" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" - labels: - openshift.io/scc: "anyuid" - openshift.io/cluster-monitoring: "true" --- apiVersion: v1 kind: Namespace metadata: name: openshift-operators + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: "v1.24" + openshift.io/scc: "anyuid" annotations: openshift.io/node-selector: "" workload.openshift.io/allowed: "management" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" - labels: - openshift.io/scc: "anyuid" diff --git a/manifests/0000_50_olm_06-psm-operator.deployment.ibm-cloud-managed.yaml b/manifests/0000_50_olm_06-psm-operator.deployment.ibm-cloud-managed.yaml index 625c91f45c..be34c67446 100644 --- a/manifests/0000_50_olm_06-psm-operator.deployment.ibm-cloud-managed.yaml +++ b/manifests/0000_50_olm_06-psm-operator.deployment.ibm-cloud-managed.yaml @@ -23,7 +23,6 @@ spec: spec: securityContext: runAsNonRoot: true - runAsUser: 65534 seccompProfile: type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount diff --git a/manifests/0000_50_olm_06-psm-operator.deployment.yaml b/manifests/0000_50_olm_06-psm-operator.deployment.yaml index b72b7c2331..f70bc614e4 100644 --- a/manifests/0000_50_olm_06-psm-operator.deployment.yaml +++ b/manifests/0000_50_olm_06-psm-operator.deployment.yaml @@ -23,7 +23,6 @@ spec: spec: securityContext: runAsNonRoot: true - runAsUser: 65534 seccompProfile: type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount diff --git a/manifests/0000_50_olm_07-olm-operator.deployment.ibm-cloud-managed.yaml b/manifests/0000_50_olm_07-olm-operator.deployment.ibm-cloud-managed.yaml index bfe0877910..7ba1c92db4 100644 --- a/manifests/0000_50_olm_07-olm-operator.deployment.ibm-cloud-managed.yaml +++ b/manifests/0000_50_olm_07-olm-operator.deployment.ibm-cloud-managed.yaml @@ -21,6 +21,10 @@ spec: annotations: target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount volumes: - name: srv-cert @@ -31,6 +35,10 @@ spec: secretName: pprof-cert containers: - name: olm-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] volumeMounts: - name: srv-cert mountPath: "/srv-cert" @@ -82,10 +90,6 @@ spec: requests: cpu: 10m memory: 160Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] nodeSelector: kubernetes.io/os: linux tolerations: @@ -101,8 +105,3 @@ spec: operator: Exists tolerationSeconds: 120 priorityClassName: system-cluster-critical - securityContext: - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault diff --git a/manifests/0000_50_olm_07-olm-operator.deployment.yaml b/manifests/0000_50_olm_07-olm-operator.deployment.yaml index c4b5816d0c..3060f73ecd 100644 --- a/manifests/0000_50_olm_07-olm-operator.deployment.yaml +++ b/manifests/0000_50_olm_07-olm-operator.deployment.yaml @@ -21,6 +21,10 @@ spec: annotations: target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount volumes: - name: srv-cert @@ -31,6 +35,10 @@ spec: secretName: pprof-cert containers: - name: olm-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] volumeMounts: - name: srv-cert mountPath: "/srv-cert" @@ -82,10 +90,6 @@ spec: requests: cpu: 10m memory: 160Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] nodeSelector: kubernetes.io/os: linux node-role.kubernetes.io/master: "" @@ -102,8 +106,3 @@ spec: operator: Exists tolerationSeconds: 120 priorityClassName: system-cluster-critical - securityContext: - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault diff --git a/manifests/0000_50_olm_08-catalog-operator.deployment.ibm-cloud-managed.yaml b/manifests/0000_50_olm_08-catalog-operator.deployment.ibm-cloud-managed.yaml index 8149feec7e..1df4b9edd1 100644 --- a/manifests/0000_50_olm_08-catalog-operator.deployment.ibm-cloud-managed.yaml +++ b/manifests/0000_50_olm_08-catalog-operator.deployment.ibm-cloud-managed.yaml @@ -21,6 +21,10 @@ spec: annotations: target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount volumes: - name: srv-cert @@ -31,6 +35,10 @@ spec: secretName: pprof-cert containers: - name: catalog-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] volumeMounts: - name: srv-cert mountPath: "/srv-cert" @@ -55,6 +63,7 @@ spec: - /srv-cert/tls.key - --client-ca - /profile-collector-cert/tls.crt + - --set-workload-user-id=false image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent ports: @@ -78,10 +87,6 @@ spec: env: - name: RELEASE_VERSION value: "0.0.1-snapshot" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] nodeSelector: kubernetes.io/os: linux tolerations: @@ -97,8 +102,3 @@ spec: operator: Exists tolerationSeconds: 120 priorityClassName: system-cluster-critical - securityContext: - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault diff --git a/manifests/0000_50_olm_08-catalog-operator.deployment.yaml b/manifests/0000_50_olm_08-catalog-operator.deployment.yaml index c44901b064..44ab309207 100644 --- a/manifests/0000_50_olm_08-catalog-operator.deployment.yaml +++ b/manifests/0000_50_olm_08-catalog-operator.deployment.yaml @@ -21,6 +21,10 @@ spec: annotations: target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount volumes: - name: srv-cert @@ -31,6 +35,10 @@ spec: secretName: pprof-cert containers: - name: catalog-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] volumeMounts: - name: srv-cert mountPath: "/srv-cert" @@ -55,6 +63,7 @@ spec: - /srv-cert/tls.key - --client-ca - /profile-collector-cert/tls.crt + - --set-workload-user-id=false image: quay.io/operator-framework/olm@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607 imagePullPolicy: IfNotPresent ports: @@ -78,10 +87,6 @@ spec: env: - name: RELEASE_VERSION value: "0.0.1-snapshot" - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] nodeSelector: kubernetes.io/os: linux node-role.kubernetes.io/master: "" @@ -98,8 +103,3 @@ spec: operator: Exists tolerationSeconds: 120 priorityClassName: system-cluster-critical - securityContext: - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault diff --git a/pkg/manifests/csv.yaml b/pkg/manifests/csv.yaml index 6b0c595719..897fbeecef 100644 --- a/pkg/manifests/csv.yaml +++ b/pkg/manifests/csv.yaml @@ -88,6 +88,10 @@ spec: target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' creationTimestamp: null spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount nodeSelector: kubernetes.io/os: linux @@ -106,6 +110,10 @@ spec: tolerationSeconds: 120 containers: - name: packageserver + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] command: - /bin/package-server - -v=4 @@ -136,10 +144,6 @@ spec: volumeMounts: - name: tmpfs mountPath: /tmp - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: ["ALL"] volumes: - name: tmpfs emptyDir: {} @@ -154,11 +158,6 @@ spec: values: - packageserver topologyKey: "kubernetes.io/hostname" - securityContext: - runAsNonRoot: true - runAsUser: 65534 - seccompProfile: - type: RuntimeDefault maturity: alpha version: 0.19.0 apiservicedefinitions: diff --git a/scripts/catalog-deployment.patch.yaml b/scripts/catalog-deployment.patch.yaml index de1abc8eaa..a7b1d63237 100644 --- a/scripts/catalog-deployment.patch.yaml +++ b/scripts/catalog-deployment.patch.yaml @@ -19,6 +19,5 @@ path: spec.template.spec.securityContext value: runAsNonRoot: true - runAsUser: 65534 seccompProfile: type: RuntimeDefault diff --git a/scripts/generate_crds_manifests.sh b/scripts/generate_crds_manifests.sh index 09c60e5896..e68c8b337b 100755 --- a/scripts/generate_crds_manifests.sh +++ b/scripts/generate_crds_manifests.sh @@ -133,7 +133,6 @@ spec: spec: securityContext: runAsNonRoot: true - runAsUser: 65534 seccompProfile: type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount @@ -367,3 +366,5 @@ add_ibm_managed_cloud_annotations "${ROOT_DIR}/manifests" find "${ROOT_DIR}/manifests" -type f -exec $SED -i "/^#/d" {} \; find "${ROOT_DIR}/manifests" -type f -exec $SED -i "1{/---/d}" {} \; + +${YQ} delete --inplace -d'0' manifests/0000_50_olm_00-namespace.yaml 'metadata.labels."pod-security.kubernetes.io/enforce*"' \ No newline at end of file diff --git a/scripts/olm-deployment.patch.yaml b/scripts/olm-deployment.patch.yaml index de1abc8eaa..a7b1d63237 100644 --- a/scripts/olm-deployment.patch.yaml +++ b/scripts/olm-deployment.patch.yaml @@ -19,6 +19,5 @@ path: spec.template.spec.securityContext value: runAsNonRoot: true - runAsUser: 65534 seccompProfile: type: RuntimeDefault diff --git a/scripts/packageserver-deployment.patch.yaml b/scripts/packageserver-deployment.patch.yaml index 2cbcafb037..f5d0eb2068 100644 --- a/scripts/packageserver-deployment.patch.yaml +++ b/scripts/packageserver-deployment.patch.yaml @@ -43,6 +43,5 @@ path: spec.install.spec.deployments[0].spec.template.spec.securityContext value: runAsNonRoot: true - runAsUser: 65534 seccompProfile: type: RuntimeDefault diff --git a/values.yaml b/values.yaml index 701fe3683f..671d83dc12 100644 --- a/values.yaml +++ b/values.yaml @@ -3,6 +3,9 @@ rbacApiVersion: rbac.authorization.k8s.io namespace: openshift-operator-lifecycle-manager catalog_namespace: openshift-marketplace operator_namespace: openshift-operators +operator_namespace_psa: + enforceLevel: baseline + enforceVersion: '"v1.24"' imagestream: true writeStatusName: operator-lifecycle-manager writeStatusNameCatalog: operator-lifecycle-manager-catalog @@ -37,6 +40,7 @@ olm: cpu: 10m memory: 160Mi catalog: + setWorkloadUserID: false replicaCount: 1 opmImageArgs: --opmImage=quay.io/operator-framework/configmap-operator-registry:latest image: