diff --git a/.golangci.yaml b/.golangci.yaml index 75ac3b0fe9..1008151884 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -33,4 +33,13 @@ linters: - unparam - usestdlibvars - wastedassign - - whitespace \ No newline at end of file + - whitespace + +linters-settings: + ireturn: + allow: + - anon + - error + - empty + - stdlib + - generic \ No newline at end of file diff --git a/assets/components/openshift-router/deployment.yaml b/assets/components/openshift-router/deployment.yaml index d9506d3c2f..ae0279652c 100644 --- a/assets/components/openshift-router/deployment.yaml +++ b/assets/components/openshift-router/deployment.yaml @@ -116,7 +116,6 @@ spec: optional: false defaultMode: 420 restartPolicy: Always - terminationGracePeriodSeconds: 3600 dnsPolicy: ClusterFirst nodeSelector: kubernetes.io/os: linux diff --git a/cmd/generate-config/config/config-openapi-spec.json b/cmd/generate-config/config/config-openapi-spec.json index 74b31c1663..d9b714befe 100755 --- a/cmd/generate-config/config/config-openapi-spec.json +++ b/cmd/generate-config/config/config-openapi-spec.json @@ -73,7 +73,8 @@ "ingress": { "type": "object", "required": [ - "routeAdmissionPolicy" + "routeAdmissionPolicy", + "status" ], "properties": { "routeAdmissionPolicy": { @@ -88,6 +89,11 @@ "default": "InterNamespaceAllowed" } } + }, + "status": { + "description": "Default router status, can be Managed or Removed.", + "type": "string", + "default": "Managed" } } }, diff --git a/docs/user/howto_config.md b/docs/user/howto_config.md index 97e0f3566a..7c7a50c805 100644 --- a/docs/user/howto_config.md +++ b/docs/user/howto_config.md @@ -20,6 +20,7 @@ etcd: ingress: routeAdmissionPolicy: namespaceOwnership: "" + status: "" manifests: kustomizePaths: - "" @@ -59,6 +60,7 @@ etcd: ingress: routeAdmissionPolicy: namespaceOwnership: InterNamespaceAllowed + status: Managed manifests: kustomizePaths: - /usr/lib/microshift/manifests diff --git a/packaging/microshift/config.yaml b/packaging/microshift/config.yaml index d46a341453..9c81a783a3 100644 --- a/packaging/microshift/config.yaml +++ b/packaging/microshift/config.yaml @@ -25,6 +25,8 @@ ingress: # - InterNamespaceAllowed: Allow routes to claim different paths of the same host name across namespaces. # If empty, the default is InterNamespaceAllowed. namespaceOwnership: InterNamespaceAllowed + # Default router status, can be Managed or Removed. + status: Managed manifests: # The locations on the filesystem to scan for kustomization files to use to load manifests. Set to a list of paths to scan only those paths. Set to an empty list to disable loading manifests. The entries in the list can be glob patterns to match multiple subdirectories. kustomizePaths: diff --git a/pkg/assets/admission-registration.go b/pkg/assets/admission-registration.go index 68657bf687..a1fe262f7c 100644 --- a/pkg/assets/admission-registration.go +++ b/pkg/assets/admission-registration.go @@ -22,7 +22,7 @@ type validationWebhookCfg struct { codec serializer.CodecFactory } -func (v *validationWebhookCfg) Reader(objBytes []byte, renderFunc RenderFunc, params RenderParams) { +func (v *validationWebhookCfg) Read(objBytes []byte, renderFunc RenderFunc, params RenderParams) { var err error if renderFunc != nil { objBytes, err = renderFunc(objBytes, RenderParams{}) @@ -37,7 +37,7 @@ func (v *validationWebhookCfg) Reader(objBytes []byte, renderFunc RenderFunc, pa v.validationWebhookConfig = obj.(*arV1.ValidatingWebhookConfiguration) } -func (v *validationWebhookCfg) Applier(ctx context.Context) error { +func (v *validationWebhookCfg) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyValidatingWebhookConfigurationImproved(ctx, v.Client, assetsEventRecorder, v.validationWebhookConfig, resourceapply.NewResourceCache()) return err } @@ -50,7 +50,7 @@ func admissionRegistrationClient(kubeconfigPath string) *arClientV1.Admissionreg return arClientV1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "admission-registration")) } -func applyAdmissionRegistration(ctx context.Context, admissionRegistrations []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applyAdmissionRegistration(ctx context.Context, admissionRegistrations []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -60,8 +60,8 @@ func applyAdmissionRegistration(ctx context.Context, admissionRegistrations []st if err != nil { return fmt.Errorf("error getting embedded asset %s: %w", ar, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("failed to apply admissionRegistration object: %s, %v", ar, err) return err } diff --git a/pkg/assets/apps.go b/pkg/assets/apps.go index 37826a8b80..df01851da6 100644 --- a/pkg/assets/apps.go +++ b/pkg/assets/apps.go @@ -42,7 +42,7 @@ type dpApplier struct { dp *appsv1.Deployment } -func (d *dpApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (d *dpApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -57,7 +57,7 @@ func (d *dpApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara d.dp = obj.(*appsv1.Deployment) } -func (d *dpApplier) Applier(ctx context.Context) error { +func (d *dpApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyDeployment(ctx, d.Client, assetsEventRecorder, d.dp, 0) return err } @@ -67,7 +67,7 @@ type dsApplier struct { ds *appsv1.DaemonSet } -func (d *dsApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (d *dsApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -81,12 +81,12 @@ func (d *dsApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara } d.ds = obj.(*appsv1.DaemonSet) } -func (d *dsApplier) Applier(ctx context.Context) error { +func (d *dsApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyDaemonSet(ctx, d.Client, assetsEventRecorder, d.ds, 0) return err } -func applyApps(ctx context.Context, apps []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applyApps(ctx context.Context, apps []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -96,8 +96,8 @@ func applyApps(ctx context.Context, apps []string, applier readerApplier, render if err != nil { return fmt.Errorf("error getting asset %s: %v", app, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply apps api %s: %v", app, err) return err } diff --git a/pkg/assets/core.go b/pkg/assets/core.go index 373bee9a24..41b7860703 100644 --- a/pkg/assets/core.go +++ b/pkg/assets/core.go @@ -28,11 +28,6 @@ func init() { } } -type nsApplier struct { - Client *coreclientv1.CoreV1Client - ns *corev1.Namespace -} - func coreClient(kubeconfigPath string) *coreclientv1.CoreV1Client { restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { @@ -42,7 +37,7 @@ func coreClient(kubeconfigPath string) *coreclientv1.CoreV1Client { return coreclientv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "core-agent")) } -func (ns *nsApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func readCore[T any](objBytes []byte, render RenderFunc, params RenderParams) T { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -54,35 +49,47 @@ func (ns *nsApplier) Reader(objBytes []byte, render RenderFunc, params RenderPar if err != nil { panic(err) } - ns.ns = obj.(*corev1.Namespace) + return obj.(T) } -func (ns *nsApplier) Applier(ctx context.Context) error { +type nsApplier struct { + Client *coreclientv1.CoreV1Client + ns *corev1.Namespace +} + +func (ns *nsApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { + ns.ns = readCore[*corev1.Namespace](objBytes, render, params) +} + +func (ns *nsApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyNamespace(ctx, ns.Client, assetsEventRecorder, ns.ns) return err } +type nsDeleter struct { + Client *coreclientv1.CoreV1Client + ns *corev1.Namespace +} + +func (ns *nsDeleter) Read(objBytes []byte, render RenderFunc, params RenderParams) { + ns.ns = readCore[*corev1.Namespace](objBytes, render, params) +} + +func (ns *nsDeleter) Handle(ctx context.Context) error { + _, _, err := resourceapply.DeleteNamespace(ctx, ns.Client, assetsEventRecorder, ns.ns) + return err +} + type secretApplier struct { Client *coreclientv1.CoreV1Client secret *corev1.Secret } -func (secret *secretApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - secret.secret = obj.(*corev1.Secret) +func (secret *secretApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { + secret.secret = readCore[*corev1.Secret](objBytes, render, params) } -func (secret *secretApplier) Applier(ctx context.Context) error { +func (secret *secretApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplySecret(ctx, secret.Client, assetsEventRecorder, secret.secret) return err } @@ -92,22 +99,11 @@ type svcApplier struct { svc *corev1.Service } -func (svc *svcApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - svc.svc = obj.(*corev1.Service) +func (svc *svcApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { + svc.svc = readCore[*corev1.Service](objBytes, render, params) } -func (svc *svcApplier) Applier(ctx context.Context) error { +func (svc *svcApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyService(ctx, svc.Client, assetsEventRecorder, svc.svc) return err } @@ -117,22 +113,11 @@ type saApplier struct { sa *corev1.ServiceAccount } -func (sa *saApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - sa.sa = obj.(*corev1.ServiceAccount) +func (sa *saApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { + sa.sa = readCore[*corev1.ServiceAccount](objBytes, render, params) } -func (sa *saApplier) Applier(ctx context.Context) error { +func (sa *saApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyServiceAccount(ctx, sa.Client, assetsEventRecorder, sa.sa) return err } @@ -142,27 +127,16 @@ type cmApplier struct { cm *corev1.ConfigMap } -func (cm *cmApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - cm.cm = obj.(*corev1.ConfigMap) +func (cm *cmApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { + cm.cm = readCore[*corev1.ConfigMap](objBytes, render, params) } -func (cm *cmApplier) Applier(ctx context.Context) error { +func (cm *cmApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyConfigMap(ctx, cm.Client, assetsEventRecorder, cm.cm) return err } -func applyCore(ctx context.Context, cores []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func handleCore(ctx context.Context, cores []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -172,8 +146,8 @@ func applyCore(ctx context.Context, cores []string, applier readerApplier, rende if err != nil { return fmt.Errorf("error getting asset %s: %v", core, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply corev1 api %s: %v", core, err) return err } @@ -185,25 +159,31 @@ func applyCore(ctx context.Context, cores []string, applier readerApplier, rende func ApplyNamespaces(ctx context.Context, cores []string, kubeconfigPath string) error { ns := &nsApplier{} ns.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, ns, nil, nil) + return handleCore(ctx, cores, ns, nil, nil) +} + +func DeleteNamespaces(ctx context.Context, cores []string, kubeconfigPath string) error { + ns := &nsDeleter{} + ns.Client = coreClient(kubeconfigPath) + return handleCore(ctx, cores, ns, nil, nil) } func ApplyServices(ctx context.Context, cores []string, render RenderFunc, params RenderParams, kubeconfigPath string) error { svc := &svcApplier{} svc.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, svc, render, params) + return handleCore(ctx, cores, svc, render, params) } func ApplyServiceAccounts(ctx context.Context, cores []string, kubeconfigPath string) error { sa := &saApplier{} sa.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, sa, nil, nil) + return handleCore(ctx, cores, sa, nil, nil) } func ApplyConfigMaps(ctx context.Context, cores []string, render RenderFunc, params RenderParams, kubeconfigPath string) error { cm := &cmApplier{} cm.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, cm, render, params) + return handleCore(ctx, cores, cm, render, params) } func ApplyConfigMapWithData(ctx context.Context, cmPath string, data map[string]string, kubeconfigPath string) error { @@ -213,7 +193,7 @@ func ApplyConfigMapWithData(ctx context.Context, cmPath string, data map[string] if err != nil { return err } - cm.Reader(cmBytes, nil, nil) + cm.Read(cmBytes, nil, nil) cm.cm.Data = data _, _, err = resourceapply.ApplyConfigMap(ctx, cm.Client, assetsEventRecorder, cm.cm) return err @@ -226,7 +206,7 @@ func ApplySecretWithData(ctx context.Context, secretPath string, data map[string if err != nil { return err } - secret.Reader(secretBytes, nil, nil) + secret.Read(secretBytes, nil, nil) secret.secret.Data = data _, _, err = resourceapply.ApplySecret(ctx, secret.Client, assetsEventRecorder, secret.secret) return err diff --git a/pkg/assets/applier.go b/pkg/assets/handler.go similarity index 75% rename from pkg/assets/applier.go rename to pkg/assets/handler.go index b5f40d8c71..b1bdff61c1 100644 --- a/pkg/assets/applier.go +++ b/pkg/assets/handler.go @@ -17,7 +17,7 @@ type RenderParams map[string]interface{} type RenderFunc func([]byte, RenderParams) ([]byte, error) -type readerApplier interface { - Reader([]byte, RenderFunc, RenderParams) - Applier(ctx context.Context) error +type resourceHandler interface { + Read([]byte, RenderFunc, RenderParams) + Handle(ctx context.Context) error } diff --git a/pkg/assets/rbac.go b/pkg/assets/rbac.go index 50f6e5d1d1..ae83fbe77e 100644 --- a/pkg/assets/rbac.go +++ b/pkg/assets/rbac.go @@ -42,7 +42,7 @@ func (crb *clusterRoleBindingApplier) New(kubeconfigPath string) { crb.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (crb *clusterRoleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (crb *clusterRoleBindingApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -50,11 +50,38 @@ func (crb *clusterRoleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ Re crb.crb = obj.(*rbacv1.ClusterRoleBinding) } -func (crb *clusterRoleBindingApplier) Applier(ctx context.Context) error { +func (crb *clusterRoleBindingApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyClusterRoleBinding(ctx, crb.client.RbacV1(), assetsEventRecorder, crb.crb) return err } +type clusterRoleBindingDeleter struct { + client *kubernetes.Clientset + crb *rbacv1.ClusterRoleBinding +} + +func (crb *clusterRoleBindingDeleter) New(kubeconfigPath string) { + restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + panic(err) + } + + crb.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) +} + +func (crb *clusterRoleBindingDeleter) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { + obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + crb.crb = obj.(*rbacv1.ClusterRoleBinding) +} + +func (crb *clusterRoleBindingDeleter) Handle(ctx context.Context) error { + _, _, err := resourceapply.DeleteClusterRoleBinding(ctx, crb.client.RbacV1(), assetsEventRecorder, crb.crb) + return err +} + type clusterRoleApplier struct { client *kubernetes.Clientset cr *rbacv1.ClusterRole @@ -69,7 +96,7 @@ func (cr *clusterRoleApplier) New(kubeconfigPath string) { cr.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (cr *clusterRoleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (cr *clusterRoleApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -77,7 +104,7 @@ func (cr *clusterRoleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderPara cr.cr = obj.(*rbacv1.ClusterRole) } -func (cr *clusterRoleApplier) Applier(ctx context.Context) error { +func (cr *clusterRoleApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyClusterRole(ctx, cr.client.RbacV1(), assetsEventRecorder, cr.cr) return err } @@ -96,7 +123,7 @@ func (rb *roleBindingApplier) New(kubeconfigPath string) { rb.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (rb *roleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (rb *roleBindingApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -104,11 +131,38 @@ func (rb *roleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderPara rb.rb = obj.(*rbacv1.RoleBinding) } -func (rb *roleBindingApplier) Applier(ctx context.Context) error { +func (rb *roleBindingApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyRoleBinding(ctx, rb.client.RbacV1(), assetsEventRecorder, rb.rb) return err } +type clusterRoleDeleter struct { + client *kubernetes.Clientset + cr *rbacv1.ClusterRole +} + +func (cr *clusterRoleDeleter) New(kubeconfigPath string) { + restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + panic(err) + } + + cr.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) +} + +func (cr *clusterRoleDeleter) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { + obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + cr.cr = obj.(*rbacv1.ClusterRole) +} + +func (cr *clusterRoleDeleter) Handle(ctx context.Context) error { + _, _, err := resourceapply.DeleteClusterRole(ctx, cr.client.RbacV1(), assetsEventRecorder, cr.cr) + return err +} + type roleApplier struct { client *kubernetes.Clientset r *rbacv1.Role @@ -123,7 +177,7 @@ func (r *roleApplier) New(kubeconfigPath string) { r.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (r *roleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (r *roleApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -131,24 +185,24 @@ func (r *roleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { r.r = obj.(*rbacv1.Role) } -func (r *roleApplier) Applier(ctx context.Context) error { +func (r *roleApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyRole(ctx, r.client.RbacV1(), assetsEventRecorder, r.r) return err } -func applyRbac(ctx context.Context, rbacs []string, applier readerApplier) error { +func handleRbac(ctx context.Context, rbacs []string, handler resourceHandler) error { lock.Lock() defer lock.Unlock() for _, rbac := range rbacs { - klog.Infof("Applying rbac %s", rbac) + klog.Infof("Handling rbac %s", rbac) objBytes, err := embedded.Asset(rbac) if err != nil { return fmt.Errorf("error getting asset %s: %v", rbac, err) } - applier.Reader(objBytes, nil, nil) - if err := applier.Applier(ctx); err != nil { - klog.Warningf("Failed to apply rbac %s: %v", rbac, err) + handler.Read(objBytes, nil, nil) + if err := handler.Handle(ctx); err != nil { + klog.Warningf("Failed to handle rbac %s: %v", rbac, err) return err } } @@ -159,22 +213,35 @@ func applyRbac(ctx context.Context, rbacs []string, applier readerApplier) error func ApplyClusterRoleBindings(ctx context.Context, rbacs []string, kubeconfigPath string) error { crb := &clusterRoleBindingApplier{} crb.New(kubeconfigPath) - return applyRbac(ctx, rbacs, crb) + return handleRbac(ctx, rbacs, crb) +} + +func DeleteClusterRoleBindings(ctx context.Context, rbacs []string, kubeconfigPath string) error { + crb := &clusterRoleBindingDeleter{} + crb.New(kubeconfigPath) + return handleRbac(ctx, rbacs, crb) } func ApplyClusterRoles(ctx context.Context, rbacs []string, kubeconfigPath string) error { cr := &clusterRoleApplier{} cr.New(kubeconfigPath) - return applyRbac(ctx, rbacs, cr) + return handleRbac(ctx, rbacs, cr) +} + +func DeleteClusterRoles(ctx context.Context, rbacs []string, kubeconfigPath string) error { + cr := &clusterRoleDeleter{} + cr.New(kubeconfigPath) + return handleRbac(ctx, rbacs, cr) } + func ApplyRoleBindings(ctx context.Context, rbacs []string, kubeconfigPath string) error { rb := &roleBindingApplier{} rb.New(kubeconfigPath) - return applyRbac(ctx, rbacs, rb) + return handleRbac(ctx, rbacs, rb) } func ApplyRoles(ctx context.Context, rbacs []string, kubeconfigPath string) error { r := &roleApplier{} r.New(kubeconfigPath) - return applyRbac(ctx, rbacs, r) + return handleRbac(ctx, rbacs, r) } diff --git a/pkg/assets/scc.go b/pkg/assets/scc.go index d883c70c88..22494dfe79 100644 --- a/pkg/assets/scc.go +++ b/pkg/assets/scc.go @@ -43,7 +43,7 @@ func sccClient(kubeconfigPath string) *sccclientv1.SecurityV1Client { return sccclientv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "scc-agent")) } -func (s *sccApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (s *sccApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -58,7 +58,7 @@ func (s *sccApplier) Reader(objBytes []byte, render RenderFunc, params RenderPar s.scc = obj.(*sccv1.SecurityContextConstraints) } -func (s *sccApplier) Applier(ctx context.Context) error { +func (s *sccApplier) Handle(ctx context.Context) error { // adapted from cvo existing, err := s.Client.SecurityContextConstraints().Get(ctx, s.scc.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { @@ -79,7 +79,7 @@ func (s *sccApplier) Applier(ctx context.Context) error { return err } -func applySCCs(ctx context.Context, sccs []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applySCCs(ctx context.Context, sccs []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -89,8 +89,8 @@ func applySCCs(ctx context.Context, sccs []string, applier readerApplier, render if err != nil { return fmt.Errorf("error getting asset %s: %v", scc, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply scc api %s: %v", scc, err) return err } diff --git a/pkg/assets/scheduling.go b/pkg/assets/scheduling.go index 1bb69b5897..77c352db33 100644 --- a/pkg/assets/scheduling.go +++ b/pkg/assets/scheduling.go @@ -33,7 +33,7 @@ func pcClient(kubeconfigPath string) *scv1.SchedulingV1Client { return scv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "pc-agent")) } -func (s *pcApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (s *pcApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -48,7 +48,7 @@ func (s *pcApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara s.pc = obj.(*sv1.PriorityClass) } -func (s *pcApplier) Applier(ctx context.Context) error { +func (s *pcApplier) Handle(ctx context.Context) error { // adapted from cvo existing, err := s.Client.PriorityClasses().Get(ctx, s.pc.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { @@ -69,7 +69,7 @@ func (s *pcApplier) Applier(ctx context.Context) error { return err } -func applyPriorityClasses(ctx context.Context, pcs []string, applier readerApplier) error { +func applyPriorityClasses(ctx context.Context, pcs []string, handler resourceHandler) error { lock.Lock() defer lock.Unlock() @@ -79,8 +79,8 @@ func applyPriorityClasses(ctx context.Context, pcs []string, applier readerAppli if err != nil { return fmt.Errorf("error getting asset %s: %v", pc, err) } - applier.Reader(objBytes, nil, nil) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, nil, nil) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply PriorityClass CR %s: %v", pc, err) return err } diff --git a/pkg/assets/storage.go b/pkg/assets/storage.go index 69041080c0..3a4d4d1733 100644 --- a/pkg/assets/storage.go +++ b/pkg/assets/storage.go @@ -45,7 +45,7 @@ type scApplier struct { sc *scv1.StorageClass } -func (s *scApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (s *scApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -59,12 +59,12 @@ func (s *scApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara } s.sc = obj.(*scv1.StorageClass) } -func (s *scApplier) Applier(ctx context.Context) error { +func (s *scApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyStorageClass(ctx, s.Client, assetsEventRecorder, s.sc) return err } -func applySCs(ctx context.Context, scs []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applySCs(ctx context.Context, scs []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -74,8 +74,8 @@ func applySCs(ctx context.Context, scs []string, applier readerApplier, render R if err != nil { return fmt.Errorf("error getting asset %s: %v", sc, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply sc api %s: %v", sc, err) return err } @@ -95,7 +95,7 @@ type cdApplier struct { cd *scv1.CSIDriver } -func (c *cdApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (c *cdApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -110,7 +110,7 @@ func (c *cdApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara c.cd = obj.(*scv1.CSIDriver) } -func (c *cdApplier) Applier(ctx context.Context) error { +func (c *cdApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyCSIDriver(ctx, c.Client, assetsEventRecorder, c.cd) return err } @@ -121,7 +121,7 @@ func ApplyCSIDrivers(ctx context.Context, drivers []string, render RenderFunc, p return applyCDs(ctx, drivers, applier, render, params) } -func applyCDs(ctx context.Context, cds []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applyCDs(ctx context.Context, cds []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -131,8 +131,8 @@ func applyCDs(ctx context.Context, cds []string, applier readerApplier, render R if err != nil { return fmt.Errorf("error getting asset %s: %v", cd, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply CSIDriver api %s: %v", cd, err) return err } @@ -145,7 +145,7 @@ type volumeSnapshotClassApplier struct { vc *unstructured.Unstructured } -func (v *volumeSnapshotClassApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (v *volumeSnapshotClassApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -162,7 +162,7 @@ func (v *volumeSnapshotClassApplier) Reader(objBytes []byte, render RenderFunc, v.vc = obj } -func (v *volumeSnapshotClassApplier) Applier(ctx context.Context) error { +func (v *volumeSnapshotClassApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyVolumeSnapshotClass(ctx, v.Client, assetsEventRecorder, v.vc) return err } @@ -173,7 +173,7 @@ func ApplyVolumeSnapshotClass(ctx context.Context, kubeconfigPath string, vcs [] return applyVolumeSnapshotClass(ctx, applier, vcs, render, params) } -func applyVolumeSnapshotClass(ctx context.Context, applier readerApplier, vcs []string, render RenderFunc, params RenderParams) error { +func applyVolumeSnapshotClass(ctx context.Context, handler resourceHandler, vcs []string, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -183,8 +183,8 @@ func applyVolumeSnapshotClass(ctx context.Context, applier readerApplier, vcs [] if err != nil { return fmt.Errorf("error getting asset %s: %v", vc, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply volumeSnapshotClass api %s: %v", vc, err) return err } diff --git a/pkg/components/controllers.go b/pkg/components/controllers.go index d983fe166e..7bf2daec8a 100644 --- a/pkg/components/controllers.go +++ b/pkg/components/controllers.go @@ -128,6 +128,23 @@ func startIngressController(ctx context.Context, cfg *config.Config, kubeconfigP cm = "components/openshift-router/configmap.yaml" servingKeypairSecret = "components/openshift-router/serving-certificate.yaml" ) + + if cfg.Ingress.Status == config.StatusRemoved { + if err := assets.DeleteClusterRoleBindings(ctx, clusterRoleBinding, kubeconfigPath); err != nil { + klog.Warningf("Failed to delete cluster role bindings %v: %v", clusterRoleBinding, err) + return err + } + if err := assets.DeleteClusterRoles(ctx, clusterRole, kubeconfigPath); err != nil { + klog.Warningf("Failed to delete cluster roles %v: %v", clusterRole, err) + return err + } + if err := assets.DeleteNamespaces(ctx, ns, kubeconfigPath); err != nil { + klog.Warningf("Failed to delete namespaces %v: %v", ns, err) + return err + } + return nil + } + if err := assets.ApplyNamespaces(ctx, ns, kubeconfigPath); err != nil { klog.Warningf("Failed to apply namespaces %v: %v", ns, err) return err diff --git a/pkg/config/config.go b/pkg/config/config.go index 357cafd538..664353fff3 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -117,6 +117,7 @@ func (c *Config) fillDefaults() error { }, } c.Ingress = IngressConfig{ + Status: StatusManaged, AdmissionPolicy: RouteAdmissionPolicy{ NamespaceOwnership: NamespaceOwnershipAllowed, }, @@ -190,6 +191,10 @@ func (c *Config) incorporateUserSettings(u *Config) { c.Manifests.KustomizePaths = u.Manifests.KustomizePaths } + if len(u.Ingress.Status) != 0 { + c.Ingress.Status = u.Ingress.Status + } + if len(u.Ingress.AdmissionPolicy.NamespaceOwnership) != 0 { c.Ingress.AdmissionPolicy.NamespaceOwnership = u.Ingress.AdmissionPolicy.NamespaceOwnership } @@ -295,6 +300,12 @@ func (c *Config) validate() error { } } + switch c.Ingress.Status { + case StatusManaged, StatusRemoved: + default: + return fmt.Errorf("unsupported ingress.status value %v", c.Ingress.Status) + } + switch c.Ingress.AdmissionPolicy.NamespaceOwnership { case NamespaceOwnershipAllowed, NamespaceOwnershipStrict: default: diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index fabce508ee..127852f1f5 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -244,6 +244,30 @@ func TestGetActiveConfigFromYAML(t *testing.T) { return c }(), }, + { + name: "router-managed", + config: dedent(` + ingress: + status: Managed + `), + expected: func() *Config { + c := mkDefaultConfig() + c.Ingress.Status = StatusManaged + return c + }(), + }, + { + name: "router-removed", + config: dedent(` + ingress: + status: Removed + `), + expected: func() *Config { + c := mkDefaultConfig() + c.Ingress.Status = StatusRemoved + return c + }(), + }, } for _, tt := range ttests { @@ -350,6 +374,15 @@ func TestValidate(t *testing.T) { }(), expectErr: true, }, + { + name: "router-status-invalid", + config: func() *Config { + c := mkDefaultConfig() + c.Ingress.Status = "invalid" + return c + }(), + expectErr: true, + }, } for _, tt := range ttests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/config/ingress.go b/pkg/config/ingress.go index c2bd853939..96aebefc37 100644 --- a/pkg/config/ingress.go +++ b/pkg/config/ingress.go @@ -1,13 +1,19 @@ package config const ( - NamespaceOwnershipStrict = "Strict" - NamespaceOwnershipAllowed = "InterNamespaceAllowed" + NamespaceOwnershipStrict NamespaceOwnershipEnum = "Strict" + NamespaceOwnershipAllowed NamespaceOwnershipEnum = "InterNamespaceAllowed" + StatusManaged IngressStatusEnum = "Managed" + StatusRemoved IngressStatusEnum = "Removed" ) type NamespaceOwnershipEnum string +type IngressStatusEnum string type IngressConfig struct { + // Default router status, can be Managed or Removed. + // +kubebuilder:default=Managed + Status IngressStatusEnum `json:"status"` AdmissionPolicy RouteAdmissionPolicy `json:"routeAdmissionPolicy"` ServingCertificate []byte `json:"-"` ServingKey []byte `json:"-"` diff --git a/scripts/auto-rebase/rebase.sh b/scripts/auto-rebase/rebase.sh index 072702e51d..e5ec1cd955 100755 --- a/scripts/auto-rebase/rebase.sh +++ b/scripts/auto-rebase/rebase.sh @@ -699,7 +699,6 @@ EOF yq -i '.spec.template.spec.containers[0].ports += {"name": "metrics", "containerPort": 1936, "protocol": "TCP"}' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.containers[0].args = ["-v=4"]' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.restartPolicy = "Always"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml - yq -i '.spec.template.spec.terminationGracePeriodSeconds = 3600' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.dnsPolicy = "ClusterFirst"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.nodeSelector = {"kubernetes.io/os": "linux", "node-role.kubernetes.io/worker": ""}' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.serviceAccount = "router"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml diff --git a/test/suites/standard1/router.robot b/test/suites/standard1/router.robot index 88d83666fb..01c6668aaf 100644 --- a/test/suites/standard1/router.robot +++ b/test/suites/standard1/router.robot @@ -1,5 +1,5 @@ *** Settings *** -Documentation Router configuration tests +Documentation Router tests Resource ../../resources/common.resource Resource ../../resources/oc.resource @@ -17,14 +17,24 @@ Test Tags restart slow ${NS_OWNERSHIP_1} ${EMPTY} ${NS_OWNERSHIP_2} ${EMPTY} ${HOSTNAME} hello-microshift.cluster.local +${ROUTER_MANAGED} SEPARATOR=\n +... --- +... ingress: +... \ \ status: Managed +${ROUTER_REMOVED} SEPARATOR=\n +... --- +... ingress: +... \ \ status: Removed ${OWNERSHIP_ALLOW} SEPARATOR=\n ... --- ... ingress: +... \ \ status: Managed ... \ \ routeAdmissionPolicy: ... \ \ \ \ namespaceOwnership: InterNamespaceAllowed ${OWNERSHIP_STRICT} SEPARATOR=\n ... --- ... ingress: +... \ \ status: Managed ... \ \ routeAdmissionPolicy: ... \ \ \ \ namespaceOwnership: Strict @@ -36,7 +46,6 @@ Router Namespace Ownership Allowed [Setup] Run Keywords ... Save Default MicroShift Config ... Configure Namespace Ownership Allowed - ... Restart MicroShift ... Setup Namespaces ... Setup Hello MicroShift Pods In Multiple Namespaces ... Restart Router @@ -58,7 +67,6 @@ Router Namespace Ownership Strict [Setup] Run Keywords ... Save Default MicroShift Config ... Configure Namespace Ownership Strict - ... Restart MicroShift ... Setup Namespaces ... Setup Hello MicroShift Pods In Multiple Namespaces ... Restart Router @@ -77,6 +85,37 @@ Router Namespace Ownership Strict ... Restore Default MicroShift Config ... Restart MicroShift +Router Enabled + [Documentation] Check default configuration, router enabled and standard ports and expose. + [Setup] Run Keywords + ... Save Default MicroShift Config + ... Enable Router + ... Create Hello MicroShift Pod + ... Expose Hello MicroShift Service Via Route + ... Restart Router + + Wait Until Keyword Succeeds 10x 6s + ... Access Hello Microshift ${HTTP_PORT} + + [Teardown] Run Keywords + ... Delete Hello MicroShift Route + ... Delete Hello MicroShift Pod And Service + ... Wait For Service Deletion With Timeout + ... Restore Default MicroShift Config + ... Restart MicroShift + +Router Disabled + [Documentation] Disable the router and check the namespace does not exist. + [Setup] Run Keywords + ... Save Default MicroShift Config + ... Disable Router + + Run With Kubeconfig oc wait --for=delete namespace/openshift-ingress --timeout=60s + + [Teardown] Run Keywords + ... Restore Default MicroShift Config + ... Restart MicroShift + *** Keywords *** Configure Namespace Ownership Allowed @@ -87,11 +126,51 @@ Configure Namespace Ownership Strict [Documentation] Configure MicroShift to use Strict namespace ownership Setup With Custom Config ${OWNERSHIP_STRICT} +Restart Router + [Documentation] Restart the router and wait for readiness again. The router is sensitive to apiserver + ... downtime and might need a restart (after the apiserver is ready) to resync all the routes. + Run With Kubeconfig oc rollout restart deployment router-default -n openshift-ingress + Named Deployment Should Be Available router-default openshift-ingress 5m + +Expose Hello MicroShift Service Via Route + [Documentation] Expose the "hello microshift" application through the Route + Oc Expose pod hello-microshift -n ${NAMESPACE} + Oc Expose svc hello-microshift --hostname hello-microshift.cluster.local -n ${NAMESPACE} + +Delete Hello MicroShift Route + [Documentation] Delete route for cleanup. + Oc Delete route/hello-microshift -n ${NAMESPACE} + +Wait For Service Deletion With Timeout + [Documentation] Polls for service and endpoint by "app=hello-microshift" label. Fails if timeout + ... expires. This check is unique to this test suite because each test here reuses the same namespace. Since + ... the tests reuse the service name, a small race window exists between the teardown of one test and the setup + ... of the next. This produces flakey failures when the service or endpoint names collide. + Wait Until Keyword Succeeds 30s 1s + ... Network APIs With Test Label Are Gone + +Network APIs With Test Label Are Gone + [Documentation] Check for service and endpoint by "app=hello-microshift" label. Succeeds if response matches + ... "No resources found in namespace." Fail if not. + ${match_string}= Catenate No resources found in ${NAMESPACE} namespace. + ${match_string}= Remove String ${match_string} " + ${response}= Run With Kubeconfig oc get svc,ep -l app\=hello-microshift -n ${NAMESPACE} + Should Be Equal As Strings ${match_string} ${response} strip_spaces=True + +Disable Router + [Documentation] Disable router + Setup With Custom Config ${ROUTER_REMOVED} + +Enable Router + [Documentation] Disable router + Setup With Custom Config ${ROUTER_MANAGED} + Setup With Custom Config [Documentation] Install a custom config and restart MicroShift [Arguments] ${config_content} ${merged}= Extend MicroShift Config ${config_content} Upload MicroShift Config ${merged} + Restart MicroShift Setup Namespaces [Documentation] Configure the required namespaces for namespace ownership tests @@ -113,9 +192,3 @@ Setup Hello MicroShift Pods In Multiple Namespaces Expose Hello MicroShift ${NS_OWNERSHIP_2} Oc Expose svc hello-microshift --hostname ${HOSTNAME} --path /${NS_OWNERSHIP_1} -n ${NS_OWNERSHIP_1} Oc Expose svc hello-microshift --hostname ${HOSTNAME} --path /${NS_OWNERSHIP_2} -n ${NS_OWNERSHIP_2} - -Restart Router - [Documentation] Restart the router and wait for readiness again. The router is sensitive to apiserver - ... downtime and might need a restart (after the apiserver is ready) to resync all the routes. - Run With Kubeconfig oc rollout restart deployment router-default -n openshift-ingress - Named Deployment Should Be Available router-default openshift-ingress 5m