From bff7bd41ec0eacde4f936689d5fe83e22659dbe5 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Fri, 1 Mar 2024 11:43:30 +0100 Subject: [PATCH 01/15] USHIFT-2444: Add ingress status config parameter --- pkg/config/config.go | 13 +++++++++++++ pkg/config/ingress.go | 4 ++++ 2 files changed, 17 insertions(+) diff --git a/pkg/config/config.go b/pkg/config/config.go index 357cafd538..4780ff482f 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -117,6 +117,7 @@ func (c *Config) fillDefaults() error { }, } c.Ingress = IngressConfig{ + Status: StatusEnabled, AdmissionPolicy: RouteAdmissionPolicy{ NamespaceOwnership: NamespaceOwnershipAllowed, }, @@ -190,8 +191,13 @@ func (c *Config) incorporateUserSettings(u *Config) { c.Manifests.KustomizePaths = u.Manifests.KustomizePaths } +<<<<<<< HEAD if len(u.Ingress.AdmissionPolicy.NamespaceOwnership) != 0 { c.Ingress.AdmissionPolicy.NamespaceOwnership = u.Ingress.AdmissionPolicy.NamespaceOwnership +======= + if len(u.Ingress.Status) != 0 { + c.Ingress.Status = u.Ingress.Status +>>>>>>> 9e6515a2d (USHIFT-2444: Add ingress status config parameter) } } @@ -295,10 +301,17 @@ func (c *Config) validate() error { } } +<<<<<<< HEAD switch c.Ingress.AdmissionPolicy.NamespaceOwnership { case NamespaceOwnershipAllowed, NamespaceOwnershipStrict: default: return fmt.Errorf("unsupported namespaceOwnership value %v", c.Ingress.AdmissionPolicy.NamespaceOwnership) +======= + switch c.Ingress.Status { + case StatusEnabled, StatusDisabled: + default: + return fmt.Errorf("unsupported ingress.status value %v", c.Ingress.Status) +>>>>>>> 9e6515a2d (USHIFT-2444: Add ingress status config parameter) } return nil diff --git a/pkg/config/ingress.go b/pkg/config/ingress.go index c2bd853939..92a4175c35 100644 --- a/pkg/config/ingress.go +++ b/pkg/config/ingress.go @@ -3,11 +3,15 @@ package config const ( NamespaceOwnershipStrict = "Strict" NamespaceOwnershipAllowed = "InterNamespaceAllowed" + StatusEnabled = "Enabled" + StatusDisabled = "Disabled" ) type NamespaceOwnershipEnum string +type IngressStatusEnum string type IngressConfig struct { + Status IngressStatusEnum `json:"status"` AdmissionPolicy RouteAdmissionPolicy `json:"routeAdmissionPolicy"` ServingCertificate []byte `json:"-"` ServingKey []byte `json:"-"` From 4ecca5750d0987fd6907473a3031ba7867a35951 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Fri, 1 Mar 2024 14:38:22 +0100 Subject: [PATCH 02/15] USHIFT-2444: Rename resource handler in assets --- pkg/assets/{applier.go => handler.go} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename pkg/assets/{applier.go => handler.go} (75%) diff --git a/pkg/assets/applier.go b/pkg/assets/handler.go similarity index 75% rename from pkg/assets/applier.go rename to pkg/assets/handler.go index b5f40d8c71..b1bdff61c1 100644 --- a/pkg/assets/applier.go +++ b/pkg/assets/handler.go @@ -17,7 +17,7 @@ type RenderParams map[string]interface{} type RenderFunc func([]byte, RenderParams) ([]byte, error) -type readerApplier interface { - Reader([]byte, RenderFunc, RenderParams) - Applier(ctx context.Context) error +type resourceHandler interface { + Read([]byte, RenderFunc, RenderParams) + Handle(ctx context.Context) error } From 872695d547cf87e68f7fbf8e6e208bc92b0d1e3e Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Fri, 1 Mar 2024 15:07:01 +0100 Subject: [PATCH 03/15] USHIFT-2444: Rename resource handlers in assets pkg --- pkg/assets/admission-registration.go | 10 +++++----- pkg/assets/apps.go | 14 ++++++------- pkg/assets/core.go | 30 ++++++++++++++-------------- pkg/assets/rbac.go | 22 ++++++++++---------- pkg/assets/scc.go | 10 +++++----- pkg/assets/scheduling.go | 10 +++++----- pkg/assets/storage.go | 30 ++++++++++++++-------------- 7 files changed, 63 insertions(+), 63 deletions(-) diff --git a/pkg/assets/admission-registration.go b/pkg/assets/admission-registration.go index 68657bf687..a1fe262f7c 100644 --- a/pkg/assets/admission-registration.go +++ b/pkg/assets/admission-registration.go @@ -22,7 +22,7 @@ type validationWebhookCfg struct { codec serializer.CodecFactory } -func (v *validationWebhookCfg) Reader(objBytes []byte, renderFunc RenderFunc, params RenderParams) { +func (v *validationWebhookCfg) Read(objBytes []byte, renderFunc RenderFunc, params RenderParams) { var err error if renderFunc != nil { objBytes, err = renderFunc(objBytes, RenderParams{}) @@ -37,7 +37,7 @@ func (v *validationWebhookCfg) Reader(objBytes []byte, renderFunc RenderFunc, pa v.validationWebhookConfig = obj.(*arV1.ValidatingWebhookConfiguration) } -func (v *validationWebhookCfg) Applier(ctx context.Context) error { +func (v *validationWebhookCfg) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyValidatingWebhookConfigurationImproved(ctx, v.Client, assetsEventRecorder, v.validationWebhookConfig, resourceapply.NewResourceCache()) return err } @@ -50,7 +50,7 @@ func admissionRegistrationClient(kubeconfigPath string) *arClientV1.Admissionreg return arClientV1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "admission-registration")) } -func applyAdmissionRegistration(ctx context.Context, admissionRegistrations []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applyAdmissionRegistration(ctx context.Context, admissionRegistrations []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -60,8 +60,8 @@ func applyAdmissionRegistration(ctx context.Context, admissionRegistrations []st if err != nil { return fmt.Errorf("error getting embedded asset %s: %w", ar, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("failed to apply admissionRegistration object: %s, %v", ar, err) return err } diff --git a/pkg/assets/apps.go b/pkg/assets/apps.go index 37826a8b80..df01851da6 100644 --- a/pkg/assets/apps.go +++ b/pkg/assets/apps.go @@ -42,7 +42,7 @@ type dpApplier struct { dp *appsv1.Deployment } -func (d *dpApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (d *dpApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -57,7 +57,7 @@ func (d *dpApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara d.dp = obj.(*appsv1.Deployment) } -func (d *dpApplier) Applier(ctx context.Context) error { +func (d *dpApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyDeployment(ctx, d.Client, assetsEventRecorder, d.dp, 0) return err } @@ -67,7 +67,7 @@ type dsApplier struct { ds *appsv1.DaemonSet } -func (d *dsApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (d *dsApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -81,12 +81,12 @@ func (d *dsApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara } d.ds = obj.(*appsv1.DaemonSet) } -func (d *dsApplier) Applier(ctx context.Context) error { +func (d *dsApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyDaemonSet(ctx, d.Client, assetsEventRecorder, d.ds, 0) return err } -func applyApps(ctx context.Context, apps []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applyApps(ctx context.Context, apps []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -96,8 +96,8 @@ func applyApps(ctx context.Context, apps []string, applier readerApplier, render if err != nil { return fmt.Errorf("error getting asset %s: %v", app, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply apps api %s: %v", app, err) return err } diff --git a/pkg/assets/core.go b/pkg/assets/core.go index 373bee9a24..2e1e76f402 100644 --- a/pkg/assets/core.go +++ b/pkg/assets/core.go @@ -42,7 +42,7 @@ func coreClient(kubeconfigPath string) *coreclientv1.CoreV1Client { return coreclientv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "core-agent")) } -func (ns *nsApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (ns *nsApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -57,7 +57,7 @@ func (ns *nsApplier) Reader(objBytes []byte, render RenderFunc, params RenderPar ns.ns = obj.(*corev1.Namespace) } -func (ns *nsApplier) Applier(ctx context.Context) error { +func (ns *nsApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyNamespace(ctx, ns.Client, assetsEventRecorder, ns.ns) return err } @@ -67,7 +67,7 @@ type secretApplier struct { secret *corev1.Secret } -func (secret *secretApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (secret *secretApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -82,7 +82,7 @@ func (secret *secretApplier) Reader(objBytes []byte, render RenderFunc, params R secret.secret = obj.(*corev1.Secret) } -func (secret *secretApplier) Applier(ctx context.Context) error { +func (secret *secretApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplySecret(ctx, secret.Client, assetsEventRecorder, secret.secret) return err } @@ -92,7 +92,7 @@ type svcApplier struct { svc *corev1.Service } -func (svc *svcApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (svc *svcApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -107,7 +107,7 @@ func (svc *svcApplier) Reader(objBytes []byte, render RenderFunc, params RenderP svc.svc = obj.(*corev1.Service) } -func (svc *svcApplier) Applier(ctx context.Context) error { +func (svc *svcApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyService(ctx, svc.Client, assetsEventRecorder, svc.svc) return err } @@ -117,7 +117,7 @@ type saApplier struct { sa *corev1.ServiceAccount } -func (sa *saApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (sa *saApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -132,7 +132,7 @@ func (sa *saApplier) Reader(objBytes []byte, render RenderFunc, params RenderPar sa.sa = obj.(*corev1.ServiceAccount) } -func (sa *saApplier) Applier(ctx context.Context) error { +func (sa *saApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyServiceAccount(ctx, sa.Client, assetsEventRecorder, sa.sa) return err } @@ -142,7 +142,7 @@ type cmApplier struct { cm *corev1.ConfigMap } -func (cm *cmApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (cm *cmApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -157,12 +157,12 @@ func (cm *cmApplier) Reader(objBytes []byte, render RenderFunc, params RenderPar cm.cm = obj.(*corev1.ConfigMap) } -func (cm *cmApplier) Applier(ctx context.Context) error { +func (cm *cmApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyConfigMap(ctx, cm.Client, assetsEventRecorder, cm.cm) return err } -func applyCore(ctx context.Context, cores []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applyCore(ctx context.Context, cores []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -172,8 +172,8 @@ func applyCore(ctx context.Context, cores []string, applier readerApplier, rende if err != nil { return fmt.Errorf("error getting asset %s: %v", core, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply corev1 api %s: %v", core, err) return err } @@ -213,7 +213,7 @@ func ApplyConfigMapWithData(ctx context.Context, cmPath string, data map[string] if err != nil { return err } - cm.Reader(cmBytes, nil, nil) + cm.Read(cmBytes, nil, nil) cm.cm.Data = data _, _, err = resourceapply.ApplyConfigMap(ctx, cm.Client, assetsEventRecorder, cm.cm) return err @@ -226,7 +226,7 @@ func ApplySecretWithData(ctx context.Context, secretPath string, data map[string if err != nil { return err } - secret.Reader(secretBytes, nil, nil) + secret.Read(secretBytes, nil, nil) secret.secret.Data = data _, _, err = resourceapply.ApplySecret(ctx, secret.Client, assetsEventRecorder, secret.secret) return err diff --git a/pkg/assets/rbac.go b/pkg/assets/rbac.go index 50f6e5d1d1..418c51ddc0 100644 --- a/pkg/assets/rbac.go +++ b/pkg/assets/rbac.go @@ -42,7 +42,7 @@ func (crb *clusterRoleBindingApplier) New(kubeconfigPath string) { crb.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (crb *clusterRoleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (crb *clusterRoleBindingApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -50,7 +50,7 @@ func (crb *clusterRoleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ Re crb.crb = obj.(*rbacv1.ClusterRoleBinding) } -func (crb *clusterRoleBindingApplier) Applier(ctx context.Context) error { +func (crb *clusterRoleBindingApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyClusterRoleBinding(ctx, crb.client.RbacV1(), assetsEventRecorder, crb.crb) return err } @@ -69,7 +69,7 @@ func (cr *clusterRoleApplier) New(kubeconfigPath string) { cr.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (cr *clusterRoleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (cr *clusterRoleApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -77,7 +77,7 @@ func (cr *clusterRoleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderPara cr.cr = obj.(*rbacv1.ClusterRole) } -func (cr *clusterRoleApplier) Applier(ctx context.Context) error { +func (cr *clusterRoleApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyClusterRole(ctx, cr.client.RbacV1(), assetsEventRecorder, cr.cr) return err } @@ -96,7 +96,7 @@ func (rb *roleBindingApplier) New(kubeconfigPath string) { rb.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (rb *roleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (rb *roleBindingApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -104,7 +104,7 @@ func (rb *roleBindingApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderPara rb.rb = obj.(*rbacv1.RoleBinding) } -func (rb *roleBindingApplier) Applier(ctx context.Context) error { +func (rb *roleBindingApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyRoleBinding(ctx, rb.client.RbacV1(), assetsEventRecorder, rb.rb) return err } @@ -123,7 +123,7 @@ func (r *roleApplier) New(kubeconfigPath string) { r.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) } -func (r *roleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { +func (r *roleApplier) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) if err != nil { panic(err) @@ -131,12 +131,12 @@ func (r *roleApplier) Reader(objBytes []byte, _ RenderFunc, _ RenderParams) { r.r = obj.(*rbacv1.Role) } -func (r *roleApplier) Applier(ctx context.Context) error { +func (r *roleApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyRole(ctx, r.client.RbacV1(), assetsEventRecorder, r.r) return err } -func applyRbac(ctx context.Context, rbacs []string, applier readerApplier) error { +func applyRbac(ctx context.Context, rbacs []string, handler resourceHandler) error { lock.Lock() defer lock.Unlock() @@ -146,8 +146,8 @@ func applyRbac(ctx context.Context, rbacs []string, applier readerApplier) error if err != nil { return fmt.Errorf("error getting asset %s: %v", rbac, err) } - applier.Reader(objBytes, nil, nil) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, nil, nil) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply rbac %s: %v", rbac, err) return err } diff --git a/pkg/assets/scc.go b/pkg/assets/scc.go index d883c70c88..22494dfe79 100644 --- a/pkg/assets/scc.go +++ b/pkg/assets/scc.go @@ -43,7 +43,7 @@ func sccClient(kubeconfigPath string) *sccclientv1.SecurityV1Client { return sccclientv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "scc-agent")) } -func (s *sccApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (s *sccApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -58,7 +58,7 @@ func (s *sccApplier) Reader(objBytes []byte, render RenderFunc, params RenderPar s.scc = obj.(*sccv1.SecurityContextConstraints) } -func (s *sccApplier) Applier(ctx context.Context) error { +func (s *sccApplier) Handle(ctx context.Context) error { // adapted from cvo existing, err := s.Client.SecurityContextConstraints().Get(ctx, s.scc.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { @@ -79,7 +79,7 @@ func (s *sccApplier) Applier(ctx context.Context) error { return err } -func applySCCs(ctx context.Context, sccs []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applySCCs(ctx context.Context, sccs []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -89,8 +89,8 @@ func applySCCs(ctx context.Context, sccs []string, applier readerApplier, render if err != nil { return fmt.Errorf("error getting asset %s: %v", scc, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply scc api %s: %v", scc, err) return err } diff --git a/pkg/assets/scheduling.go b/pkg/assets/scheduling.go index 1bb69b5897..77c352db33 100644 --- a/pkg/assets/scheduling.go +++ b/pkg/assets/scheduling.go @@ -33,7 +33,7 @@ func pcClient(kubeconfigPath string) *scv1.SchedulingV1Client { return scv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "pc-agent")) } -func (s *pcApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (s *pcApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -48,7 +48,7 @@ func (s *pcApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara s.pc = obj.(*sv1.PriorityClass) } -func (s *pcApplier) Applier(ctx context.Context) error { +func (s *pcApplier) Handle(ctx context.Context) error { // adapted from cvo existing, err := s.Client.PriorityClasses().Get(ctx, s.pc.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { @@ -69,7 +69,7 @@ func (s *pcApplier) Applier(ctx context.Context) error { return err } -func applyPriorityClasses(ctx context.Context, pcs []string, applier readerApplier) error { +func applyPriorityClasses(ctx context.Context, pcs []string, handler resourceHandler) error { lock.Lock() defer lock.Unlock() @@ -79,8 +79,8 @@ func applyPriorityClasses(ctx context.Context, pcs []string, applier readerAppli if err != nil { return fmt.Errorf("error getting asset %s: %v", pc, err) } - applier.Reader(objBytes, nil, nil) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, nil, nil) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply PriorityClass CR %s: %v", pc, err) return err } diff --git a/pkg/assets/storage.go b/pkg/assets/storage.go index 69041080c0..3a4d4d1733 100644 --- a/pkg/assets/storage.go +++ b/pkg/assets/storage.go @@ -45,7 +45,7 @@ type scApplier struct { sc *scv1.StorageClass } -func (s *scApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (s *scApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -59,12 +59,12 @@ func (s *scApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara } s.sc = obj.(*scv1.StorageClass) } -func (s *scApplier) Applier(ctx context.Context) error { +func (s *scApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyStorageClass(ctx, s.Client, assetsEventRecorder, s.sc) return err } -func applySCs(ctx context.Context, scs []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applySCs(ctx context.Context, scs []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -74,8 +74,8 @@ func applySCs(ctx context.Context, scs []string, applier readerApplier, render R if err != nil { return fmt.Errorf("error getting asset %s: %v", sc, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply sc api %s: %v", sc, err) return err } @@ -95,7 +95,7 @@ type cdApplier struct { cd *scv1.CSIDriver } -func (c *cdApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (c *cdApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -110,7 +110,7 @@ func (c *cdApplier) Reader(objBytes []byte, render RenderFunc, params RenderPara c.cd = obj.(*scv1.CSIDriver) } -func (c *cdApplier) Applier(ctx context.Context) error { +func (c *cdApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyCSIDriver(ctx, c.Client, assetsEventRecorder, c.cd) return err } @@ -121,7 +121,7 @@ func ApplyCSIDrivers(ctx context.Context, drivers []string, render RenderFunc, p return applyCDs(ctx, drivers, applier, render, params) } -func applyCDs(ctx context.Context, cds []string, applier readerApplier, render RenderFunc, params RenderParams) error { +func applyCDs(ctx context.Context, cds []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -131,8 +131,8 @@ func applyCDs(ctx context.Context, cds []string, applier readerApplier, render R if err != nil { return fmt.Errorf("error getting asset %s: %v", cd, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply CSIDriver api %s: %v", cd, err) return err } @@ -145,7 +145,7 @@ type volumeSnapshotClassApplier struct { vc *unstructured.Unstructured } -func (v *volumeSnapshotClassApplier) Reader(objBytes []byte, render RenderFunc, params RenderParams) { +func (v *volumeSnapshotClassApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -162,7 +162,7 @@ func (v *volumeSnapshotClassApplier) Reader(objBytes []byte, render RenderFunc, v.vc = obj } -func (v *volumeSnapshotClassApplier) Applier(ctx context.Context) error { +func (v *volumeSnapshotClassApplier) Handle(ctx context.Context) error { _, _, err := resourceapply.ApplyVolumeSnapshotClass(ctx, v.Client, assetsEventRecorder, v.vc) return err } @@ -173,7 +173,7 @@ func ApplyVolumeSnapshotClass(ctx context.Context, kubeconfigPath string, vcs [] return applyVolumeSnapshotClass(ctx, applier, vcs, render, params) } -func applyVolumeSnapshotClass(ctx context.Context, applier readerApplier, vcs []string, render RenderFunc, params RenderParams) error { +func applyVolumeSnapshotClass(ctx context.Context, handler resourceHandler, vcs []string, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -183,8 +183,8 @@ func applyVolumeSnapshotClass(ctx context.Context, applier readerApplier, vcs [] if err != nil { return fmt.Errorf("error getting asset %s: %v", vc, err) } - applier.Reader(objBytes, render, params) - if err := applier.Applier(ctx); err != nil { + handler.Read(objBytes, render, params) + if err := handler.Handle(ctx); err != nil { klog.Warningf("Failed to apply volumeSnapshotClass api %s: %v", vc, err) return err } From 021004accf0e4252bf7eb0404f3b0414f53f6870 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Fri, 1 Mar 2024 15:30:10 +0100 Subject: [PATCH 04/15] USHIFT-2444: simplify assets core handler --- pkg/assets/core.go | 80 ++++++++++++---------------------------------- 1 file changed, 20 insertions(+), 60 deletions(-) diff --git a/pkg/assets/core.go b/pkg/assets/core.go index 2e1e76f402..3bb5f2c2c9 100644 --- a/pkg/assets/core.go +++ b/pkg/assets/core.go @@ -28,11 +28,6 @@ func init() { } } -type nsApplier struct { - Client *coreclientv1.CoreV1Client - ns *corev1.Namespace -} - func coreClient(kubeconfigPath string) *coreclientv1.CoreV1Client { restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { @@ -42,7 +37,7 @@ func coreClient(kubeconfigPath string) *coreclientv1.CoreV1Client { return coreclientv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "core-agent")) } -func (ns *nsApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { +func readCore(objBytes []byte, render RenderFunc, params RenderParams) runtime.Object { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -54,7 +49,16 @@ func (ns *nsApplier) Read(objBytes []byte, render RenderFunc, params RenderParam if err != nil { panic(err) } - ns.ns = obj.(*corev1.Namespace) + return obj +} + +type nsApplier struct { + Client *coreclientv1.CoreV1Client + ns *corev1.Namespace +} + +func (ns *nsApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { + ns.ns = readCore(objBytes, render, params).(*corev1.Namespace) } func (ns *nsApplier) Handle(ctx context.Context) error { @@ -68,18 +72,7 @@ type secretApplier struct { } func (secret *secretApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - secret.secret = obj.(*corev1.Secret) + secret.secret = readCore(objBytes, render, params).(*corev1.Secret) } func (secret *secretApplier) Handle(ctx context.Context) error { @@ -93,18 +86,7 @@ type svcApplier struct { } func (svc *svcApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - svc.svc = obj.(*corev1.Service) + svc.svc = readCore(objBytes, render, params).(*corev1.Service) } func (svc *svcApplier) Handle(ctx context.Context) error { @@ -118,18 +100,7 @@ type saApplier struct { } func (sa *saApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - sa.sa = obj.(*corev1.ServiceAccount) + sa.sa = readCore(objBytes, render, params).(*corev1.ServiceAccount) } func (sa *saApplier) Handle(ctx context.Context) error { @@ -143,18 +114,7 @@ type cmApplier struct { } func (cm *cmApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - var err error - if render != nil { - objBytes, err = render(objBytes, params) - if err != nil { - panic(err) - } - } - obj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - cm.cm = obj.(*corev1.ConfigMap) + cm.cm = readCore(objBytes, render, params).(*corev1.ConfigMap) } func (cm *cmApplier) Handle(ctx context.Context) error { @@ -162,7 +122,7 @@ func (cm *cmApplier) Handle(ctx context.Context) error { return err } -func applyCore(ctx context.Context, cores []string, handler resourceHandler, render RenderFunc, params RenderParams) error { +func handleCore(ctx context.Context, cores []string, handler resourceHandler, render RenderFunc, params RenderParams) error { lock.Lock() defer lock.Unlock() @@ -185,25 +145,25 @@ func applyCore(ctx context.Context, cores []string, handler resourceHandler, ren func ApplyNamespaces(ctx context.Context, cores []string, kubeconfigPath string) error { ns := &nsApplier{} ns.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, ns, nil, nil) + return handleCore(ctx, cores, ns, nil, nil) } func ApplyServices(ctx context.Context, cores []string, render RenderFunc, params RenderParams, kubeconfigPath string) error { svc := &svcApplier{} svc.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, svc, render, params) + return handleCore(ctx, cores, svc, render, params) } func ApplyServiceAccounts(ctx context.Context, cores []string, kubeconfigPath string) error { sa := &saApplier{} sa.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, sa, nil, nil) + return handleCore(ctx, cores, sa, nil, nil) } func ApplyConfigMaps(ctx context.Context, cores []string, render RenderFunc, params RenderParams, kubeconfigPath string) error { cm := &cmApplier{} cm.Client = coreClient(kubeconfigPath) - return applyCore(ctx, cores, cm, render, params) + return handleCore(ctx, cores, cm, render, params) } func ApplyConfigMapWithData(ctx context.Context, cmPath string, data map[string]string, kubeconfigPath string) error { From 2f85820265e3fbdde51c391453064bb6617b8da0 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Fri, 1 Mar 2024 16:58:40 +0100 Subject: [PATCH 05/15] USHIFT-2444: Delete ingress namespace on disable --- pkg/assets/core.go | 20 ++++++++++++++++++++ pkg/components/controllers.go | 9 +++++++++ 2 files changed, 29 insertions(+) diff --git a/pkg/assets/core.go b/pkg/assets/core.go index 3bb5f2c2c9..110cc962e6 100644 --- a/pkg/assets/core.go +++ b/pkg/assets/core.go @@ -66,6 +66,20 @@ func (ns *nsApplier) Handle(ctx context.Context) error { return err } +type nsDeleter struct { + Client *coreclientv1.CoreV1Client + ns *corev1.Namespace +} + +func (ns *nsDeleter) Read(objBytes []byte, render RenderFunc, params RenderParams) { + ns.ns = readCore(objBytes, render, params).(*corev1.Namespace) +} + +func (ns *nsDeleter) Handle(ctx context.Context) error { + _, _, err := resourceapply.DeleteNamespace(ctx, ns.Client, assetsEventRecorder, ns.ns) + return err +} + type secretApplier struct { Client *coreclientv1.CoreV1Client secret *corev1.Secret @@ -148,6 +162,12 @@ func ApplyNamespaces(ctx context.Context, cores []string, kubeconfigPath string) return handleCore(ctx, cores, ns, nil, nil) } +func DeleteNamespaces(ctx context.Context, cores []string, kubeconfigPath string) error { + ns := &nsDeleter{} + ns.Client = coreClient(kubeconfigPath) + return handleCore(ctx, cores, ns, nil, nil) +} + func ApplyServices(ctx context.Context, cores []string, render RenderFunc, params RenderParams, kubeconfigPath string) error { svc := &svcApplier{} svc.Client = coreClient(kubeconfigPath) diff --git a/pkg/components/controllers.go b/pkg/components/controllers.go index d983fe166e..9aab63f881 100644 --- a/pkg/components/controllers.go +++ b/pkg/components/controllers.go @@ -128,6 +128,15 @@ func startIngressController(ctx context.Context, cfg *config.Config, kubeconfigP cm = "components/openshift-router/configmap.yaml" servingKeypairSecret = "components/openshift-router/serving-certificate.yaml" ) + + if cfg.Ingress.Status == config.StatusDisabled { + if err := assets.DeleteNamespaces(ctx, ns, kubeconfigPath); err != nil { + klog.Warningf("Failed to delete namespaces %v: %v", ns, err) + return err + } + return nil + } + if err := assets.ApplyNamespaces(ctx, ns, kubeconfigPath); err != nil { klog.Warningf("Failed to apply namespaces %v: %v", ns, err) return err From d3de72cb24ca79d27622d1cf1a827bf8cd7bbcba Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Fri, 1 Mar 2024 17:29:27 +0100 Subject: [PATCH 06/15] USHIFT-2444: Add deletion of cluster wide resources on disable --- pkg/assets/rbac.go | 81 ++++++++++++++++++++++++++++++++--- pkg/components/controllers.go | 8 ++++ 2 files changed, 82 insertions(+), 7 deletions(-) diff --git a/pkg/assets/rbac.go b/pkg/assets/rbac.go index 418c51ddc0..ae83fbe77e 100644 --- a/pkg/assets/rbac.go +++ b/pkg/assets/rbac.go @@ -55,6 +55,33 @@ func (crb *clusterRoleBindingApplier) Handle(ctx context.Context) error { return err } +type clusterRoleBindingDeleter struct { + client *kubernetes.Clientset + crb *rbacv1.ClusterRoleBinding +} + +func (crb *clusterRoleBindingDeleter) New(kubeconfigPath string) { + restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + panic(err) + } + + crb.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) +} + +func (crb *clusterRoleBindingDeleter) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { + obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + crb.crb = obj.(*rbacv1.ClusterRoleBinding) +} + +func (crb *clusterRoleBindingDeleter) Handle(ctx context.Context) error { + _, _, err := resourceapply.DeleteClusterRoleBinding(ctx, crb.client.RbacV1(), assetsEventRecorder, crb.crb) + return err +} + type clusterRoleApplier struct { client *kubernetes.Clientset cr *rbacv1.ClusterRole @@ -109,6 +136,33 @@ func (rb *roleBindingApplier) Handle(ctx context.Context) error { return err } +type clusterRoleDeleter struct { + client *kubernetes.Clientset + cr *rbacv1.ClusterRole +} + +func (cr *clusterRoleDeleter) New(kubeconfigPath string) { + restConfig, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + panic(err) + } + + cr.client = kubernetes.NewForConfigOrDie(rest.AddUserAgent(restConfig, "rbac-agent")) +} + +func (cr *clusterRoleDeleter) Read(objBytes []byte, _ RenderFunc, _ RenderParams) { + obj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + cr.cr = obj.(*rbacv1.ClusterRole) +} + +func (cr *clusterRoleDeleter) Handle(ctx context.Context) error { + _, _, err := resourceapply.DeleteClusterRole(ctx, cr.client.RbacV1(), assetsEventRecorder, cr.cr) + return err +} + type roleApplier struct { client *kubernetes.Clientset r *rbacv1.Role @@ -136,19 +190,19 @@ func (r *roleApplier) Handle(ctx context.Context) error { return err } -func applyRbac(ctx context.Context, rbacs []string, handler resourceHandler) error { +func handleRbac(ctx context.Context, rbacs []string, handler resourceHandler) error { lock.Lock() defer lock.Unlock() for _, rbac := range rbacs { - klog.Infof("Applying rbac %s", rbac) + klog.Infof("Handling rbac %s", rbac) objBytes, err := embedded.Asset(rbac) if err != nil { return fmt.Errorf("error getting asset %s: %v", rbac, err) } handler.Read(objBytes, nil, nil) if err := handler.Handle(ctx); err != nil { - klog.Warningf("Failed to apply rbac %s: %v", rbac, err) + klog.Warningf("Failed to handle rbac %s: %v", rbac, err) return err } } @@ -159,22 +213,35 @@ func applyRbac(ctx context.Context, rbacs []string, handler resourceHandler) err func ApplyClusterRoleBindings(ctx context.Context, rbacs []string, kubeconfigPath string) error { crb := &clusterRoleBindingApplier{} crb.New(kubeconfigPath) - return applyRbac(ctx, rbacs, crb) + return handleRbac(ctx, rbacs, crb) +} + +func DeleteClusterRoleBindings(ctx context.Context, rbacs []string, kubeconfigPath string) error { + crb := &clusterRoleBindingDeleter{} + crb.New(kubeconfigPath) + return handleRbac(ctx, rbacs, crb) } func ApplyClusterRoles(ctx context.Context, rbacs []string, kubeconfigPath string) error { cr := &clusterRoleApplier{} cr.New(kubeconfigPath) - return applyRbac(ctx, rbacs, cr) + return handleRbac(ctx, rbacs, cr) +} + +func DeleteClusterRoles(ctx context.Context, rbacs []string, kubeconfigPath string) error { + cr := &clusterRoleDeleter{} + cr.New(kubeconfigPath) + return handleRbac(ctx, rbacs, cr) } + func ApplyRoleBindings(ctx context.Context, rbacs []string, kubeconfigPath string) error { rb := &roleBindingApplier{} rb.New(kubeconfigPath) - return applyRbac(ctx, rbacs, rb) + return handleRbac(ctx, rbacs, rb) } func ApplyRoles(ctx context.Context, rbacs []string, kubeconfigPath string) error { r := &roleApplier{} r.New(kubeconfigPath) - return applyRbac(ctx, rbacs, r) + return handleRbac(ctx, rbacs, r) } diff --git a/pkg/components/controllers.go b/pkg/components/controllers.go index 9aab63f881..4a16ac30ce 100644 --- a/pkg/components/controllers.go +++ b/pkg/components/controllers.go @@ -130,6 +130,14 @@ func startIngressController(ctx context.Context, cfg *config.Config, kubeconfigP ) if cfg.Ingress.Status == config.StatusDisabled { + if err := assets.DeleteClusterRoleBindings(ctx, clusterRoleBinding, kubeconfigPath); err != nil { + klog.Warningf("Failed to delete cluster role bindings %v: %v", clusterRoleBinding, err) + return err + } + if err := assets.DeleteClusterRoles(ctx, clusterRole, kubeconfigPath); err != nil { + klog.Warningf("Failed to delete cluster roles %v: %v", clusterRole, err) + return err + } if err := assets.DeleteNamespaces(ctx, ns, kubeconfigPath); err != nil { klog.Warningf("Failed to delete namespaces %v: %v", ns, err) return err From 4db9305077d24bb50af940391bdce7c55278d338 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Fri, 1 Mar 2024 17:54:42 +0100 Subject: [PATCH 07/15] USHIFT-2444: Add e2e tests --- pkg/config/config.go | 22 ++++---- test/suites/standard1/router.robot | 91 +++++++++++++++++++++++++++--- 2 files changed, 92 insertions(+), 21 deletions(-) diff --git a/pkg/config/config.go b/pkg/config/config.go index 4780ff482f..f0e991e207 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -191,13 +191,12 @@ func (c *Config) incorporateUserSettings(u *Config) { c.Manifests.KustomizePaths = u.Manifests.KustomizePaths } -<<<<<<< HEAD - if len(u.Ingress.AdmissionPolicy.NamespaceOwnership) != 0 { - c.Ingress.AdmissionPolicy.NamespaceOwnership = u.Ingress.AdmissionPolicy.NamespaceOwnership -======= if len(u.Ingress.Status) != 0 { c.Ingress.Status = u.Ingress.Status ->>>>>>> 9e6515a2d (USHIFT-2444: Add ingress status config parameter) + } + + if len(u.Ingress.AdmissionPolicy.NamespaceOwnership) != 0 { + c.Ingress.AdmissionPolicy.NamespaceOwnership = u.Ingress.AdmissionPolicy.NamespaceOwnership } } @@ -301,17 +300,16 @@ func (c *Config) validate() error { } } -<<<<<<< HEAD - switch c.Ingress.AdmissionPolicy.NamespaceOwnership { - case NamespaceOwnershipAllowed, NamespaceOwnershipStrict: - default: - return fmt.Errorf("unsupported namespaceOwnership value %v", c.Ingress.AdmissionPolicy.NamespaceOwnership) -======= switch c.Ingress.Status { case StatusEnabled, StatusDisabled: default: return fmt.Errorf("unsupported ingress.status value %v", c.Ingress.Status) ->>>>>>> 9e6515a2d (USHIFT-2444: Add ingress status config parameter) + } + + switch c.Ingress.AdmissionPolicy.NamespaceOwnership { + case NamespaceOwnershipAllowed, NamespaceOwnershipStrict: + default: + return fmt.Errorf("unsupported namespaceOwnership value %v", c.Ingress.AdmissionPolicy.NamespaceOwnership) } return nil diff --git a/test/suites/standard1/router.robot b/test/suites/standard1/router.robot index 88d83666fb..cb17b2f898 100644 --- a/test/suites/standard1/router.robot +++ b/test/suites/standard1/router.robot @@ -1,5 +1,5 @@ *** Settings *** -Documentation Router configuration tests +Documentation Router tests Resource ../../resources/common.resource Resource ../../resources/oc.resource @@ -17,14 +17,24 @@ Test Tags restart slow ${NS_OWNERSHIP_1} ${EMPTY} ${NS_OWNERSHIP_2} ${EMPTY} ${HOSTNAME} hello-microshift.cluster.local +${ROUTER_ENABLED} SEPARATOR=\n +... --- +... ingress: +... \ \ status: Enabled +${ROUTER_DISABLED} SEPARATOR=\n +... --- +... ingress: +... \ \ status: Disabled ${OWNERSHIP_ALLOW} SEPARATOR=\n ... --- ... ingress: +... \ \ status: Enabled ... \ \ routeAdmissionPolicy: ... \ \ \ \ namespaceOwnership: InterNamespaceAllowed ${OWNERSHIP_STRICT} SEPARATOR=\n ... --- ... ingress: +... \ \ status: Enabled ... \ \ routeAdmissionPolicy: ... \ \ \ \ namespaceOwnership: Strict @@ -36,7 +46,6 @@ Router Namespace Ownership Allowed [Setup] Run Keywords ... Save Default MicroShift Config ... Configure Namespace Ownership Allowed - ... Restart MicroShift ... Setup Namespaces ... Setup Hello MicroShift Pods In Multiple Namespaces ... Restart Router @@ -58,7 +67,6 @@ Router Namespace Ownership Strict [Setup] Run Keywords ... Save Default MicroShift Config ... Configure Namespace Ownership Strict - ... Restart MicroShift ... Setup Namespaces ... Setup Hello MicroShift Pods In Multiple Namespaces ... Restart Router @@ -77,6 +85,37 @@ Router Namespace Ownership Strict ... Restore Default MicroShift Config ... Restart MicroShift +Router Enabled + [Documentation] Check default configuration, router enabled and standard ports and expose. + [Setup] Run Keywords + ... Save Default MicroShift Config + ... Enable Router + ... Create Hello MicroShift Pod + ... Expose Hello MicroShift Service Via Route + ... Restart Router + + Wait Until Keyword Succeeds 10x 6s + ... Access Hello Microshift ${HTTP_PORT} + + [Teardown] Run Keywords + ... Delete Hello MicroShift Route + ... Delete Hello MicroShift Pod And Service + ... Wait For Service Deletion With Timeout + ... Restore Default MicroShift Config + ... Restart MicroShift + +Router Disabled + [Documentation] Disable the router and check the namespace does not exist. + [Setup] Run Keywords + ... Save Default MicroShift Config + ... Disable Router + + Run With Kubeconfig oc wait --for=delete namespace/openshift-ingress --timeout=60s + + [Teardown] Run Keywords + ... Restore Default MicroShift Config + ... Restart MicroShift + *** Keywords *** Configure Namespace Ownership Allowed @@ -87,11 +126,51 @@ Configure Namespace Ownership Strict [Documentation] Configure MicroShift to use Strict namespace ownership Setup With Custom Config ${OWNERSHIP_STRICT} +Restart Router + [Documentation] Restart the router and wait for readiness again. The router is sensitive to apiserver + ... downtime and might need a restart (after the apiserver is ready) to resync all the routes. + Run With Kubeconfig oc rollout restart deployment router-default -n openshift-ingress + Named Deployment Should Be Available router-default openshift-ingress 5m + +Expose Hello MicroShift Service Via Route + [Documentation] Expose the "hello microshift" application through the Route + Oc Expose pod hello-microshift -n ${NAMESPACE} + Oc Expose svc hello-microshift --hostname hello-microshift.cluster.local -n ${NAMESPACE} + +Delete Hello MicroShift Route + [Documentation] Delete route for cleanup. + Oc Delete route/hello-microshift -n ${NAMESPACE} + +Wait For Service Deletion With Timeout + [Documentation] Polls for service and endpoint by "app=hello-microshift" label. Fails if timeout + ... expires. This check is unique to this test suite because each test here reuses the same namespace. Since + ... the tests reuse the service name, a small race window exists between the teardown of one test and the setup + ... of the next. This produces flakey failures when the service or endpoint names collide. + Wait Until Keyword Succeeds 30s 1s + ... Network APIs With Test Label Are Gone + +Network APIs With Test Label Are Gone + [Documentation] Check for service and endpoint by "app=hello-microshift" label. Succeeds if response matches + ... "No resources found in namespace." Fail if not. + ${match_string}= Catenate No resources found in ${NAMESPACE} namespace. + ${match_string}= Remove String ${match_string} " + ${response}= Run With Kubeconfig oc get svc,ep -l app\=hello-microshift -n ${NAMESPACE} + Should Be Equal As Strings ${match_string} ${response} strip_spaces=True + +Disable Router + [Documentation] Disable router + Setup With Custom Config ${ROUTER_DISABLED} + +Enable Router + [Documentation] Disable router + Setup With Custom Config ${ROUTER_ENABLED} + Setup With Custom Config [Documentation] Install a custom config and restart MicroShift [Arguments] ${config_content} ${merged}= Extend MicroShift Config ${config_content} Upload MicroShift Config ${merged} + Restart MicroShift Setup Namespaces [Documentation] Configure the required namespaces for namespace ownership tests @@ -113,9 +192,3 @@ Setup Hello MicroShift Pods In Multiple Namespaces Expose Hello MicroShift ${NS_OWNERSHIP_2} Oc Expose svc hello-microshift --hostname ${HOSTNAME} --path /${NS_OWNERSHIP_1} -n ${NS_OWNERSHIP_1} Oc Expose svc hello-microshift --hostname ${HOSTNAME} --path /${NS_OWNERSHIP_2} -n ${NS_OWNERSHIP_2} - -Restart Router - [Documentation] Restart the router and wait for readiness again. The router is sensitive to apiserver - ... downtime and might need a restart (after the apiserver is ready) to resync all the routes. - Run With Kubeconfig oc rollout restart deployment router-default -n openshift-ingress - Named Deployment Should Be Available router-default openshift-ingress 5m From 0e382fdbaa7bd0fe3dc5ea670cc5df19e8c18a8b Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Mon, 4 Mar 2024 15:42:17 +0100 Subject: [PATCH 08/15] USHIFT-2444: Reduce termination grace period seconds for router-default --- assets/components/openshift-router/deployment.yaml | 2 +- scripts/auto-rebase/rebase.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/assets/components/openshift-router/deployment.yaml b/assets/components/openshift-router/deployment.yaml index d9506d3c2f..d3c07c04d1 100644 --- a/assets/components/openshift-router/deployment.yaml +++ b/assets/components/openshift-router/deployment.yaml @@ -116,7 +116,7 @@ spec: optional: false defaultMode: 420 restartPolicy: Always - terminationGracePeriodSeconds: 3600 + terminationGracePeriodSeconds: 10 dnsPolicy: ClusterFirst nodeSelector: kubernetes.io/os: linux diff --git a/scripts/auto-rebase/rebase.sh b/scripts/auto-rebase/rebase.sh index 072702e51d..346408a2ec 100755 --- a/scripts/auto-rebase/rebase.sh +++ b/scripts/auto-rebase/rebase.sh @@ -699,7 +699,7 @@ EOF yq -i '.spec.template.spec.containers[0].ports += {"name": "metrics", "containerPort": 1936, "protocol": "TCP"}' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.containers[0].args = ["-v=4"]' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.restartPolicy = "Always"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml - yq -i '.spec.template.spec.terminationGracePeriodSeconds = 3600' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml + yq -i '.spec.template.spec.terminationGracePeriodSeconds = 10' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.dnsPolicy = "ClusterFirst"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.nodeSelector = {"kubernetes.io/os": "linux", "node-role.kubernetes.io/worker": ""}' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.serviceAccount = "router"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml From ecd577aa081c7712916e3d5d6105263c681e71ae Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Mon, 4 Mar 2024 17:11:21 +0100 Subject: [PATCH 09/15] USHIFT-2444: Use generics in assets package --- pkg/assets/core.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/assets/core.go b/pkg/assets/core.go index 110cc962e6..41b7860703 100644 --- a/pkg/assets/core.go +++ b/pkg/assets/core.go @@ -37,7 +37,7 @@ func coreClient(kubeconfigPath string) *coreclientv1.CoreV1Client { return coreclientv1.NewForConfigOrDie(rest.AddUserAgent(restConfig, "core-agent")) } -func readCore(objBytes []byte, render RenderFunc, params RenderParams) runtime.Object { +func readCore[T any](objBytes []byte, render RenderFunc, params RenderParams) T { var err error if render != nil { objBytes, err = render(objBytes, params) @@ -49,7 +49,7 @@ func readCore(objBytes []byte, render RenderFunc, params RenderParams) runtime.O if err != nil { panic(err) } - return obj + return obj.(T) } type nsApplier struct { @@ -58,7 +58,7 @@ type nsApplier struct { } func (ns *nsApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - ns.ns = readCore(objBytes, render, params).(*corev1.Namespace) + ns.ns = readCore[*corev1.Namespace](objBytes, render, params) } func (ns *nsApplier) Handle(ctx context.Context) error { @@ -72,7 +72,7 @@ type nsDeleter struct { } func (ns *nsDeleter) Read(objBytes []byte, render RenderFunc, params RenderParams) { - ns.ns = readCore(objBytes, render, params).(*corev1.Namespace) + ns.ns = readCore[*corev1.Namespace](objBytes, render, params) } func (ns *nsDeleter) Handle(ctx context.Context) error { @@ -86,7 +86,7 @@ type secretApplier struct { } func (secret *secretApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - secret.secret = readCore(objBytes, render, params).(*corev1.Secret) + secret.secret = readCore[*corev1.Secret](objBytes, render, params) } func (secret *secretApplier) Handle(ctx context.Context) error { @@ -100,7 +100,7 @@ type svcApplier struct { } func (svc *svcApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - svc.svc = readCore(objBytes, render, params).(*corev1.Service) + svc.svc = readCore[*corev1.Service](objBytes, render, params) } func (svc *svcApplier) Handle(ctx context.Context) error { @@ -114,7 +114,7 @@ type saApplier struct { } func (sa *saApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - sa.sa = readCore(objBytes, render, params).(*corev1.ServiceAccount) + sa.sa = readCore[*corev1.ServiceAccount](objBytes, render, params) } func (sa *saApplier) Handle(ctx context.Context) error { @@ -128,7 +128,7 @@ type cmApplier struct { } func (cm *cmApplier) Read(objBytes []byte, render RenderFunc, params RenderParams) { - cm.cm = readCore(objBytes, render, params).(*corev1.ConfigMap) + cm.cm = readCore[*corev1.ConfigMap](objBytes, render, params) } func (cm *cmApplier) Handle(ctx context.Context) error { From 0e00d6089e7de3092b66b8f03a6283d199c8e6e2 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Tue, 5 Mar 2024 11:22:04 +0100 Subject: [PATCH 10/15] USHIFT-2444: Allow generic interfaces in ireturn linter --- .golangci.yaml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.golangci.yaml b/.golangci.yaml index 75ac3b0fe9..1008151884 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -33,4 +33,13 @@ linters: - unparam - usestdlibvars - wastedassign - - whitespace \ No newline at end of file + - whitespace + +linters-settings: + ireturn: + allow: + - anon + - error + - empty + - stdlib + - generic \ No newline at end of file From 03d6d72be6b7fe40036b3ab39cf3a8fda6194860 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Tue, 5 Mar 2024 12:27:39 +0100 Subject: [PATCH 11/15] USHIFT-2444: Update generated config --- cmd/generate-config/config/config-openapi-spec.json | 8 +++++++- docs/user/howto_config.md | 2 ++ packaging/microshift/config.yaml | 2 ++ pkg/config/ingress.go | 2 ++ 4 files changed, 13 insertions(+), 1 deletion(-) diff --git a/cmd/generate-config/config/config-openapi-spec.json b/cmd/generate-config/config/config-openapi-spec.json index 74b31c1663..9b75d26287 100755 --- a/cmd/generate-config/config/config-openapi-spec.json +++ b/cmd/generate-config/config/config-openapi-spec.json @@ -73,7 +73,8 @@ "ingress": { "type": "object", "required": [ - "routeAdmissionPolicy" + "routeAdmissionPolicy", + "status" ], "properties": { "routeAdmissionPolicy": { @@ -88,6 +89,11 @@ "default": "InterNamespaceAllowed" } } + }, + "status": { + "description": "Default router status, can be Enabled or Disabled.", + "type": "string", + "default": "Enabled" } } }, diff --git a/docs/user/howto_config.md b/docs/user/howto_config.md index 97e0f3566a..2701e8320c 100644 --- a/docs/user/howto_config.md +++ b/docs/user/howto_config.md @@ -20,6 +20,7 @@ etcd: ingress: routeAdmissionPolicy: namespaceOwnership: "" + status: "" manifests: kustomizePaths: - "" @@ -59,6 +60,7 @@ etcd: ingress: routeAdmissionPolicy: namespaceOwnership: InterNamespaceAllowed + status: Enabled manifests: kustomizePaths: - /usr/lib/microshift/manifests diff --git a/packaging/microshift/config.yaml b/packaging/microshift/config.yaml index d46a341453..becda99e2c 100644 --- a/packaging/microshift/config.yaml +++ b/packaging/microshift/config.yaml @@ -25,6 +25,8 @@ ingress: # - InterNamespaceAllowed: Allow routes to claim different paths of the same host name across namespaces. # If empty, the default is InterNamespaceAllowed. namespaceOwnership: InterNamespaceAllowed + # Default router status, can be Enabled or Disabled. + status: Enabled manifests: # The locations on the filesystem to scan for kustomization files to use to load manifests. Set to a list of paths to scan only those paths. Set to an empty list to disable loading manifests. The entries in the list can be glob patterns to match multiple subdirectories. kustomizePaths: diff --git a/pkg/config/ingress.go b/pkg/config/ingress.go index 92a4175c35..c589784d0e 100644 --- a/pkg/config/ingress.go +++ b/pkg/config/ingress.go @@ -11,6 +11,8 @@ type NamespaceOwnershipEnum string type IngressStatusEnum string type IngressConfig struct { + // Default router status, can be Enabled or Disabled. + // +kubebuilder:default=Enabled Status IngressStatusEnum `json:"status"` AdmissionPolicy RouteAdmissionPolicy `json:"routeAdmissionPolicy"` ServingCertificate []byte `json:"-"` From fc70e254ce4ee52b229576068e2c31ebf51a6d1e Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Tue, 12 Mar 2024 13:53:55 +0100 Subject: [PATCH 12/15] USHIFT-2444: Remove termination grace period from router deployment --- assets/components/openshift-router/deployment.yaml | 1 - scripts/auto-rebase/rebase.sh | 1 - 2 files changed, 2 deletions(-) diff --git a/assets/components/openshift-router/deployment.yaml b/assets/components/openshift-router/deployment.yaml index d3c07c04d1..ae0279652c 100644 --- a/assets/components/openshift-router/deployment.yaml +++ b/assets/components/openshift-router/deployment.yaml @@ -116,7 +116,6 @@ spec: optional: false defaultMode: 420 restartPolicy: Always - terminationGracePeriodSeconds: 10 dnsPolicy: ClusterFirst nodeSelector: kubernetes.io/os: linux diff --git a/scripts/auto-rebase/rebase.sh b/scripts/auto-rebase/rebase.sh index 346408a2ec..e5ec1cd955 100755 --- a/scripts/auto-rebase/rebase.sh +++ b/scripts/auto-rebase/rebase.sh @@ -699,7 +699,6 @@ EOF yq -i '.spec.template.spec.containers[0].ports += {"name": "metrics", "containerPort": 1936, "protocol": "TCP"}' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.containers[0].args = ["-v=4"]' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.restartPolicy = "Always"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml - yq -i '.spec.template.spec.terminationGracePeriodSeconds = 10' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.dnsPolicy = "ClusterFirst"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.nodeSelector = {"kubernetes.io/os": "linux", "node-role.kubernetes.io/worker": ""}' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml yq -i '.spec.template.spec.serviceAccount = "router"' "${REPOROOT}"/assets/components/openshift-router/deployment.yaml From 17446d913aa5ff78eeb6f0f0a48233c5fff751c3 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Tue, 12 Mar 2024 16:56:16 +0100 Subject: [PATCH 13/15] USHIFT-2444: Add type to ingress constants --- pkg/config/ingress.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/config/ingress.go b/pkg/config/ingress.go index c589784d0e..61b2e72d11 100644 --- a/pkg/config/ingress.go +++ b/pkg/config/ingress.go @@ -1,10 +1,10 @@ package config const ( - NamespaceOwnershipStrict = "Strict" - NamespaceOwnershipAllowed = "InterNamespaceAllowed" - StatusEnabled = "Enabled" - StatusDisabled = "Disabled" + NamespaceOwnershipStrict NamespaceOwnershipEnum = "Strict" + NamespaceOwnershipAllowed NamespaceOwnershipEnum = "InterNamespaceAllowed" + StatusEnabled IngressStatusEnum = "Enabled" + StatusDisabled IngressStatusEnum = "Disabled" ) type NamespaceOwnershipEnum string From 59c8c1834bb744a83c6d53c2cbb06e07e4450e9b Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Wed, 13 Mar 2024 13:08:10 +0100 Subject: [PATCH 14/15] USHIFT-2444: Add unit tests --- pkg/config/config_test.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index fabce508ee..e4b2defcff 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -244,6 +244,30 @@ func TestGetActiveConfigFromYAML(t *testing.T) { return c }(), }, + { + name: "router-enable", + config: dedent(` + ingress: + status: Enabled + `), + expected: func() *Config { + c := mkDefaultConfig() + c.Ingress.Status = StatusEnabled + return c + }(), + }, + { + name: "router-disable", + config: dedent(` + ingress: + status: Disabled + `), + expected: func() *Config { + c := mkDefaultConfig() + c.Ingress.Status = StatusDisabled + return c + }(), + }, } for _, tt := range ttests { From 71b5d53f627f207f9f15cc66588c8d7c94e33d58 Mon Sep 17 00:00:00 2001 From: Pablo Acevedo Montserrat Date: Tue, 2 Apr 2024 12:42:39 +0200 Subject: [PATCH 15/15] USHIFT-2444: Update status values to Managed/Removed --- .../config/config-openapi-spec.json | 4 ++-- docs/user/howto_config.md | 2 +- packaging/microshift/config.yaml | 4 ++-- pkg/components/controllers.go | 2 +- pkg/config/config.go | 4 ++-- pkg/config/config_test.go | 21 +++++++++++++------ pkg/config/ingress.go | 8 +++---- test/suites/standard1/router.robot | 16 +++++++------- 8 files changed, 35 insertions(+), 26 deletions(-) diff --git a/cmd/generate-config/config/config-openapi-spec.json b/cmd/generate-config/config/config-openapi-spec.json index 9b75d26287..d9b714befe 100755 --- a/cmd/generate-config/config/config-openapi-spec.json +++ b/cmd/generate-config/config/config-openapi-spec.json @@ -91,9 +91,9 @@ } }, "status": { - "description": "Default router status, can be Enabled or Disabled.", + "description": "Default router status, can be Managed or Removed.", "type": "string", - "default": "Enabled" + "default": "Managed" } } }, diff --git a/docs/user/howto_config.md b/docs/user/howto_config.md index 2701e8320c..7c7a50c805 100644 --- a/docs/user/howto_config.md +++ b/docs/user/howto_config.md @@ -60,7 +60,7 @@ etcd: ingress: routeAdmissionPolicy: namespaceOwnership: InterNamespaceAllowed - status: Enabled + status: Managed manifests: kustomizePaths: - /usr/lib/microshift/manifests diff --git a/packaging/microshift/config.yaml b/packaging/microshift/config.yaml index becda99e2c..9c81a783a3 100644 --- a/packaging/microshift/config.yaml +++ b/packaging/microshift/config.yaml @@ -25,8 +25,8 @@ ingress: # - InterNamespaceAllowed: Allow routes to claim different paths of the same host name across namespaces. # If empty, the default is InterNamespaceAllowed. namespaceOwnership: InterNamespaceAllowed - # Default router status, can be Enabled or Disabled. - status: Enabled + # Default router status, can be Managed or Removed. + status: Managed manifests: # The locations on the filesystem to scan for kustomization files to use to load manifests. Set to a list of paths to scan only those paths. Set to an empty list to disable loading manifests. The entries in the list can be glob patterns to match multiple subdirectories. kustomizePaths: diff --git a/pkg/components/controllers.go b/pkg/components/controllers.go index 4a16ac30ce..7bf2daec8a 100644 --- a/pkg/components/controllers.go +++ b/pkg/components/controllers.go @@ -129,7 +129,7 @@ func startIngressController(ctx context.Context, cfg *config.Config, kubeconfigP servingKeypairSecret = "components/openshift-router/serving-certificate.yaml" ) - if cfg.Ingress.Status == config.StatusDisabled { + if cfg.Ingress.Status == config.StatusRemoved { if err := assets.DeleteClusterRoleBindings(ctx, clusterRoleBinding, kubeconfigPath); err != nil { klog.Warningf("Failed to delete cluster role bindings %v: %v", clusterRoleBinding, err) return err diff --git a/pkg/config/config.go b/pkg/config/config.go index f0e991e207..664353fff3 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -117,7 +117,7 @@ func (c *Config) fillDefaults() error { }, } c.Ingress = IngressConfig{ - Status: StatusEnabled, + Status: StatusManaged, AdmissionPolicy: RouteAdmissionPolicy{ NamespaceOwnership: NamespaceOwnershipAllowed, }, @@ -301,7 +301,7 @@ func (c *Config) validate() error { } switch c.Ingress.Status { - case StatusEnabled, StatusDisabled: + case StatusManaged, StatusRemoved: default: return fmt.Errorf("unsupported ingress.status value %v", c.Ingress.Status) } diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index e4b2defcff..127852f1f5 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -245,26 +245,26 @@ func TestGetActiveConfigFromYAML(t *testing.T) { }(), }, { - name: "router-enable", + name: "router-managed", config: dedent(` ingress: - status: Enabled + status: Managed `), expected: func() *Config { c := mkDefaultConfig() - c.Ingress.Status = StatusEnabled + c.Ingress.Status = StatusManaged return c }(), }, { - name: "router-disable", + name: "router-removed", config: dedent(` ingress: - status: Disabled + status: Removed `), expected: func() *Config { c := mkDefaultConfig() - c.Ingress.Status = StatusDisabled + c.Ingress.Status = StatusRemoved return c }(), }, @@ -374,6 +374,15 @@ func TestValidate(t *testing.T) { }(), expectErr: true, }, + { + name: "router-status-invalid", + config: func() *Config { + c := mkDefaultConfig() + c.Ingress.Status = "invalid" + return c + }(), + expectErr: true, + }, } for _, tt := range ttests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/config/ingress.go b/pkg/config/ingress.go index 61b2e72d11..96aebefc37 100644 --- a/pkg/config/ingress.go +++ b/pkg/config/ingress.go @@ -3,16 +3,16 @@ package config const ( NamespaceOwnershipStrict NamespaceOwnershipEnum = "Strict" NamespaceOwnershipAllowed NamespaceOwnershipEnum = "InterNamespaceAllowed" - StatusEnabled IngressStatusEnum = "Enabled" - StatusDisabled IngressStatusEnum = "Disabled" + StatusManaged IngressStatusEnum = "Managed" + StatusRemoved IngressStatusEnum = "Removed" ) type NamespaceOwnershipEnum string type IngressStatusEnum string type IngressConfig struct { - // Default router status, can be Enabled or Disabled. - // +kubebuilder:default=Enabled + // Default router status, can be Managed or Removed. + // +kubebuilder:default=Managed Status IngressStatusEnum `json:"status"` AdmissionPolicy RouteAdmissionPolicy `json:"routeAdmissionPolicy"` ServingCertificate []byte `json:"-"` diff --git a/test/suites/standard1/router.robot b/test/suites/standard1/router.robot index cb17b2f898..01c6668aaf 100644 --- a/test/suites/standard1/router.robot +++ b/test/suites/standard1/router.robot @@ -17,24 +17,24 @@ Test Tags restart slow ${NS_OWNERSHIP_1} ${EMPTY} ${NS_OWNERSHIP_2} ${EMPTY} ${HOSTNAME} hello-microshift.cluster.local -${ROUTER_ENABLED} SEPARATOR=\n +${ROUTER_MANAGED} SEPARATOR=\n ... --- ... ingress: -... \ \ status: Enabled -${ROUTER_DISABLED} SEPARATOR=\n +... \ \ status: Managed +${ROUTER_REMOVED} SEPARATOR=\n ... --- ... ingress: -... \ \ status: Disabled +... \ \ status: Removed ${OWNERSHIP_ALLOW} SEPARATOR=\n ... --- ... ingress: -... \ \ status: Enabled +... \ \ status: Managed ... \ \ routeAdmissionPolicy: ... \ \ \ \ namespaceOwnership: InterNamespaceAllowed ${OWNERSHIP_STRICT} SEPARATOR=\n ... --- ... ingress: -... \ \ status: Enabled +... \ \ status: Managed ... \ \ routeAdmissionPolicy: ... \ \ \ \ namespaceOwnership: Strict @@ -159,11 +159,11 @@ Network APIs With Test Label Are Gone Disable Router [Documentation] Disable router - Setup With Custom Config ${ROUTER_DISABLED} + Setup With Custom Config ${ROUTER_REMOVED} Enable Router [Documentation] Disable router - Setup With Custom Config ${ROUTER_ENABLED} + Setup With Custom Config ${ROUTER_MANAGED} Setup With Custom Config [Documentation] Install a custom config and restart MicroShift